1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"
  33 #include "ci/ciInstance.hpp"
  34 #include "gc/shared/collectedHeap.hpp"
  35 #include "memory/universe.hpp"
  36 #include "nativeInst_ppc.hpp"
  37 #include "oops/compressedOops.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "runtime/frame.inline.hpp"
  40 #include "runtime/os.inline.hpp"
  41 #include "runtime/safepointMechanism.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/vm_version.hpp"
  45 #include "utilities/macros.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 
  48 #define __ _masm->
  49 
  50 
  51 const ConditionRegister LIR_Assembler::BOOL_RESULT = CR5;
  52 
  53 
  54 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
  55   Unimplemented(); return false; // Currently not used on this platform.
  56 }
  57 
  58 
  59 LIR_Opr LIR_Assembler::receiverOpr() {
  60   return FrameMap::R3_oop_opr;
  61 }
  62 
  63 
  64 LIR_Opr LIR_Assembler::osrBufferPointer() {
  65   return FrameMap::R3_opr;
  66 }
  67 
  68 
  69 // This specifies the stack pointer decrement needed to build the frame.
  70 int LIR_Assembler::initial_frame_size_in_bytes() const {
  71   return in_bytes(frame_map()->framesize_in_bytes());
  72 }
  73 
  74 
  75 // Inline cache check: the inline cached class is in inline_cache_reg;
  76 // we fetch the class of the receiver and compare it with the cached class.
  77 // If they do not match we jump to slow case.
  78 int LIR_Assembler::check_icache() {
  79   return __ ic_check(CodeEntryAlignment);
  80 }
  81 
  82 void LIR_Assembler::clinit_barrier(ciMethod* method) {
  83   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
  84 
  85   Label L_skip_barrier;
  86   Register klass = R20;
  87 
  88   metadata2reg(method->holder()->constant_encoding(), klass);
  89   __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
  90 
  91   __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
  92   __ mtctr(klass);
  93   __ bctr();
  94 
  95   __ bind(L_skip_barrier);
  96 }
  97 
  98 void LIR_Assembler::osr_entry() {
  99   // On-stack-replacement entry sequence:
 100   //
 101   //   1. Create a new compiled activation.
 102   //   2. Initialize local variables in the compiled activation. The expression
 103   //      stack must be empty at the osr_bci; it is not initialized.
 104   //   3. Jump to the continuation address in compiled code to resume execution.
 105 
 106   // OSR entry point
 107   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 108   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 109   ValueStack* entry_state = osr_entry->end()->state();
 110   int number_of_locks = entry_state->locks_size();
 111 
 112   // Create a frame for the compiled activation.
 113   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 114 
 115   // OSR buffer is
 116   //
 117   // locals[nlocals-1..0]
 118   // monitors[number_of_locks-1..0]
 119   //
 120   // Locals is a direct copy of the interpreter frame so in the osr buffer
 121   // the first slot in the local array is the last local from the interpreter
 122   // and the last slot is local[0] (receiver) from the interpreter.
 123   //
 124   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 125   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 126   // in the interpreter frame (the method lock if a sync method).
 127 
 128   // Initialize monitors in the compiled activation.
 129   //   R3: pointer to osr buffer
 130   //
 131   // All other registers are dead at this point and the locals will be
 132   // copied into place by code emitted in the IR.
 133 
 134   Register OSR_buf = osrBufferPointer()->as_register();
 135   {
 136     assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 137 
 138     const int locals_space = BytesPerWord * method()->max_locals();
 139     int monitor_offset = locals_space + (2 * BytesPerWord) * (number_of_locks - 1);
 140     bool use_OSR_bias = false;
 141 
 142     if (!Assembler::is_simm16(monitor_offset + BytesPerWord) && number_of_locks > 0) {
 143       // Offsets too large for ld instructions. Use bias.
 144       __ add_const_optimized(OSR_buf, OSR_buf, locals_space);
 145       monitor_offset -= locals_space;
 146       use_OSR_bias = true;
 147     }
 148 
 149     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 150     // the OSR buffer using 2 word entries: first the lock and then
 151     // the oop.
 152     for (int i = 0; i < number_of_locks; i++) {
 153       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 154 #ifdef ASSERT
 155       // Verify the interpreter's monitor has a non-null object.
 156       {
 157         Label L;
 158         __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 159         __ cmpdi(CR0, R0, 0);
 160         __ bne(CR0, L);
 161         __ stop("locked object is null");
 162         __ bind(L);
 163       }
 164 #endif // ASSERT
 165       // Copy the lock field into the compiled activation.
 166       Address ml = frame_map()->address_for_monitor_lock(i),
 167               mo = frame_map()->address_for_monitor_object(i);
 168       assert(ml.index() == noreg && mo.index() == noreg, "sanity");
 169       __ ld(R0, slot_offset + 0, OSR_buf);
 170       __ std(R0, ml);
 171       __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 172       __ std(R0, mo);
 173     }
 174 
 175     if (use_OSR_bias) {
 176       // Restore.
 177       __ sub_const_optimized(OSR_buf, OSR_buf, locals_space);
 178     }
 179   }
 180 }
 181 
 182 
 183 int LIR_Assembler::emit_exception_handler() {
 184   // Generate code for the exception handler.
 185   address handler_base = __ start_a_stub(exception_handler_size());
 186 
 187   if (handler_base == nullptr) {
 188     // Not enough space left for the handler.
 189     bailout("exception handler overflow");
 190     return -1;
 191   }
 192 
 193   int offset = code_offset();
 194   address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id));
 195   //__ load_const_optimized(R0, entry_point);
 196   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));
 197   __ mtctr(R0);
 198   __ bctr();
 199 
 200   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 201   __ end_a_stub();
 202 
 203   return offset;
 204 }
 205 
 206 
 207 // Emit the code to remove the frame from the stack in the exception
 208 // unwind path.
 209 int LIR_Assembler::emit_unwind_handler() {
 210   _masm->block_comment("Unwind handler");
 211 
 212   int offset = code_offset();
 213   bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes();
 214   const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31;
 215 
 216   // Fetch the exception from TLS and clear out exception related thread state.
 217   __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 218   __ li(R0, 0);
 219   __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 220   __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
 221 
 222   __ bind(_unwind_handler_entry);
 223   __ verify_not_null_oop(Rexception);
 224   if (preserve_exception) { __ mr(Rexception_save, Rexception); }
 225 
 226   // Perform needed unlocking
 227   MonitorExitStub* stub = nullptr;
 228   if (method()->is_synchronized()) {
 229     monitor_address(0, FrameMap::R4_opr);
 230     stub = new MonitorExitStub(FrameMap::R4_opr, 0);
 231     __ unlock_object(R5, R6, R4, *stub->entry());
 232     __ bind(*stub->continuation());
 233   }
 234 
 235   if (compilation()->env()->dtrace_method_probes()) {
 236     Unimplemented();
 237   }
 238 
 239   // Dispatch to the unwind logic.
 240   address unwind_stub = Runtime1::entry_for(StubId::c1_unwind_exception_id);
 241   //__ load_const_optimized(R0, unwind_stub);
 242   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub));
 243   if (preserve_exception) { __ mr(Rexception, Rexception_save); }
 244   __ mtctr(R0);
 245   __ bctr();
 246 
 247   // Emit the slow path assembly.
 248   if (stub != nullptr) {
 249     stub->emit_code(this);
 250   }
 251 
 252   return offset;
 253 }
 254 
 255 
 256 int LIR_Assembler::emit_deopt_handler() {
 257   // Generate code for deopt handler.
 258   address handler_base = __ start_a_stub(deopt_handler_size());
 259 
 260   if (handler_base == nullptr) {
 261     // Not enough space left for the handler.
 262     bailout("deopt handler overflow");
 263     return -1;
 264   }
 265 
 266   int offset = code_offset();
 267   Label start;
 268 
 269   __ bind(start);
 270   __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type);
 271   int entry_offset = __ offset();
 272   __ b(start);
 273 
 274   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 275   __ end_a_stub();
 276 
 277   return entry_offset;
 278 }
 279 
 280 
 281 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 282   if (o == nullptr) {
 283     __ li(reg, 0);
 284   } else {
 285     AddressLiteral addrlit = __ constant_oop_address(o);
 286     __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg);
 287   }
 288 }
 289 
 290 
 291 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 292   // Allocate a new index in table to hold the object once it's been patched.
 293   int oop_index = __ oop_recorder()->allocate_oop_index(nullptr);
 294   PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
 295 
 296   AddressLiteral addrlit((address)nullptr, oop_Relocation::spec(oop_index));
 297   __ load_const(reg, addrlit, R0);
 298 
 299   patching_epilog(patch, lir_patch_normal, reg, info);
 300 }
 301 
 302 
 303 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
 304   AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation)
 305   __ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg);
 306 }
 307 
 308 
 309 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
 310   // Allocate a new index in table to hold the klass once it's been patched.
 311   int index = __ oop_recorder()->allocate_metadata_index(nullptr);
 312   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
 313 
 314   AddressLiteral addrlit((address)nullptr, metadata_Relocation::spec(index));
 315   assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
 316   __ load_const(reg, addrlit, R0);
 317 
 318   patching_epilog(patch, lir_patch_normal, reg, info);
 319 }
 320 
 321 
 322 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
 323   const bool is_int = result->is_single_cpu();
 324   Register Rdividend = is_int ? left->as_register() : left->as_register_lo();
 325   Register Rdivisor  = noreg;
 326   Register Rscratch  = temp->as_register();
 327   Register Rresult   = is_int ? result->as_register() : result->as_register_lo();
 328   long divisor = -1;
 329 
 330   if (right->is_register()) {
 331     Rdivisor = is_int ? right->as_register() : right->as_register_lo();
 332   } else {
 333     divisor = is_int ? right->as_constant_ptr()->as_jint()
 334                      : right->as_constant_ptr()->as_jlong();
 335   }
 336 
 337   assert(Rdividend != Rscratch, "");
 338   assert(Rdivisor  != Rscratch, "");
 339   assert(code == lir_idiv || code == lir_irem, "Must be irem or idiv");
 340 
 341   if (Rdivisor == noreg) {
 342     if (divisor == 1) { // stupid, but can happen
 343       if (code == lir_idiv) {
 344         __ mr_if_needed(Rresult, Rdividend);
 345       } else {
 346         __ li(Rresult, 0);
 347       }
 348 
 349     } else if (is_power_of_2(divisor)) {
 350       // Convert division by a power of two into some shifts and logical operations.
 351       int log2 = log2i_exact(divisor);
 352 
 353       // Round towards 0.
 354       if (divisor == 2) {
 355         if (is_int) {
 356           __ srwi(Rscratch, Rdividend, 31);
 357         } else {
 358           __ srdi(Rscratch, Rdividend, 63);
 359         }
 360       } else {
 361         if (is_int) {
 362           __ srawi(Rscratch, Rdividend, 31);
 363         } else {
 364           __ sradi(Rscratch, Rdividend, 63);
 365         }
 366         __ clrldi(Rscratch, Rscratch, 64-log2);
 367       }
 368       __ add(Rscratch, Rdividend, Rscratch);
 369 
 370       if (code == lir_idiv) {
 371         if (is_int) {
 372           __ srawi(Rresult, Rscratch, log2);
 373         } else {
 374           __ sradi(Rresult, Rscratch, log2);
 375         }
 376       } else { // lir_irem
 377         __ clrrdi(Rscratch, Rscratch, log2);
 378         __ sub(Rresult, Rdividend, Rscratch);
 379       }
 380 
 381     } else if (divisor == -1) {
 382       if (code == lir_idiv) {
 383         __ neg(Rresult, Rdividend);
 384       } else {
 385         __ li(Rresult, 0);
 386       }
 387 
 388     } else {
 389       __ load_const_optimized(Rscratch, divisor);
 390       if (code == lir_idiv) {
 391         if (is_int) {
 392           __ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1.
 393         } else {
 394           __ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1.
 395         }
 396       } else {
 397         assert(Rscratch != R0, "need both");
 398         if (is_int) {
 399           __ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1.
 400           __ mullw(Rscratch, R0, Rscratch);
 401         } else {
 402           __ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1.
 403           __ mulld(Rscratch, R0, Rscratch);
 404         }
 405         __ sub(Rresult, Rdividend, Rscratch);
 406       }
 407 
 408     }
 409     return;
 410   }
 411 
 412   Label regular, done;
 413   if (is_int) {
 414     __ cmpwi(CR0, Rdivisor, -1);
 415   } else {
 416     __ cmpdi(CR0, Rdivisor, -1);
 417   }
 418   __ bne(CR0, regular);
 419   if (code == lir_idiv) {
 420     __ neg(Rresult, Rdividend);
 421     __ b(done);
 422     __ bind(regular);
 423     if (is_int) {
 424       __ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1.
 425     } else {
 426       __ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1.
 427     }
 428   } else { // lir_irem
 429     __ li(Rresult, 0);
 430     __ b(done);
 431     __ bind(regular);
 432     if (is_int) {
 433       __ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1.
 434       __ mullw(Rscratch, Rscratch, Rdivisor);
 435     } else {
 436       __ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1.
 437       __ mulld(Rscratch, Rscratch, Rdivisor);
 438     }
 439     __ sub(Rresult, Rdividend, Rscratch);
 440   }
 441   __ bind(done);
 442 }
 443 
 444 
 445 void LIR_Assembler::emit_op3(LIR_Op3* op) {
 446   switch (op->code()) {
 447   case lir_idiv:
 448   case lir_irem:
 449     arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(),
 450                     op->result_opr(), op->info());
 451     break;
 452   case lir_fmad:
 453     __ fmadd(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(),
 454              op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg());
 455     break;
 456   case lir_fmaf:
 457     __ fmadds(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(),
 458               op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg());
 459     break;
 460   default: ShouldNotReachHere(); break;
 461   }
 462 }
 463 
 464 
 465 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
 466 #ifdef ASSERT
 467   assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
 468   if (op->block() != nullptr)  _branch_target_blocks.append(op->block());
 469   if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
 470   assert(op->info() == nullptr, "shouldn't have CodeEmitInfo");
 471 #endif
 472 
 473   Label *L = op->label();
 474   if (op->cond() == lir_cond_always) {
 475     __ b(*L);
 476   } else {
 477     Label done;
 478     bool is_unordered = false;
 479     if (op->code() == lir_cond_float_branch) {
 480       assert(op->ublock() != nullptr, "must have unordered successor");
 481       is_unordered = true;
 482     } else {
 483       assert(op->code() == lir_branch, "just checking");
 484     }
 485 
 486     bool positive = false;
 487     Assembler::Condition cond = Assembler::equal;
 488     switch (op->cond()) {
 489       case lir_cond_equal:        positive = true ; cond = Assembler::equal  ; is_unordered = false; break;
 490       case lir_cond_notEqual:     positive = false; cond = Assembler::equal  ; is_unordered = false; break;
 491       case lir_cond_less:         positive = true ; cond = Assembler::less   ; break;
 492       case lir_cond_belowEqual:   assert(op->code() != lir_cond_float_branch, ""); // fallthru
 493       case lir_cond_lessEqual:    positive = false; cond = Assembler::greater; break;
 494       case lir_cond_greater:      positive = true ; cond = Assembler::greater; break;
 495       case lir_cond_aboveEqual:   assert(op->code() != lir_cond_float_branch, ""); // fallthru
 496       case lir_cond_greaterEqual: positive = false; cond = Assembler::less   ; break;
 497       default:                    ShouldNotReachHere();
 498     }
 499     int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
 500     int bi = Assembler::bi0(BOOL_RESULT, cond);
 501     if (is_unordered) {
 502       if (positive) {
 503         if (op->ublock() == op->block()) {
 504           __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L);
 505         }
 506       } else {
 507         if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); }
 508       }
 509     }
 510     __ bc_far_optimized(bo, bi, *L);
 511     __ bind(done);
 512   }
 513 }
 514 
 515 
 516 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
 517   Bytecodes::Code code = op->bytecode();
 518   LIR_Opr src = op->in_opr(),
 519           dst = op->result_opr();
 520 
 521   switch(code) {
 522     case Bytecodes::_i2l: {
 523       __ extsw(dst->as_register_lo(), src->as_register());
 524       break;
 525     }
 526     case Bytecodes::_l2i: {
 527       __ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage
 528       break;
 529     }
 530     case Bytecodes::_i2b: {
 531       __ extsb(dst->as_register(), src->as_register());
 532       break;
 533     }
 534     case Bytecodes::_i2c: {
 535       __ clrldi(dst->as_register(), src->as_register(), 64-16);
 536       break;
 537     }
 538     case Bytecodes::_i2s: {
 539       __ extsh(dst->as_register(), src->as_register());
 540       break;
 541     }
 542     case Bytecodes::_i2d:{
 543       FloatRegister rdst = dst->as_double_reg();
 544       // move src to dst register
 545       __ mtfprwa(rdst, src->as_register());
 546       __ fcfid(rdst, rdst);
 547       break;
 548     }
 549     case Bytecodes::_l2d: {
 550       FloatRegister rdst = dst->as_double_reg();
 551       // move src to dst register
 552       __ mtfprd(rdst, src->as_register_lo());
 553       __ fcfid(rdst, rdst);
 554       break;
 555     }
 556     case Bytecodes::_i2f:{
 557       FloatRegister rdst = dst->as_float_reg();
 558       // move src to dst register
 559       __ mtfprwa(rdst, src->as_register());
 560       __ fcfids(rdst, rdst);
 561       break;
 562     }
 563     case Bytecodes::_l2f: {
 564       FloatRegister rdst = dst->as_float_reg();
 565       // move src to dst register
 566       __ mtfprd(rdst, src->as_register_lo());
 567       __ fcfids(rdst, rdst);
 568       break;
 569     }
 570     case Bytecodes::_f2d: {
 571       __ fmr_if_needed(dst->as_double_reg(), src->as_float_reg());
 572       break;
 573     }
 574     case Bytecodes::_d2f: {
 575       __ frsp(dst->as_float_reg(), src->as_double_reg());
 576       break;
 577     }
 578     case Bytecodes::_d2i:
 579     case Bytecodes::_f2i: {
 580       FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg();
 581       Label L;
 582       // Result must be 0 if value is NaN; test by comparing value to itself.
 583       __ fcmpu(CR0, rsrc, rsrc);
 584       __ li(dst->as_register(), 0);
 585       __ bso(CR0, L);
 586       __ fctiwz(rsrc, rsrc); // USE_KILL
 587       __ mffprd(dst->as_register(), rsrc);
 588       __ bind(L);
 589       break;
 590     }
 591     case Bytecodes::_d2l:
 592     case Bytecodes::_f2l: {
 593       FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg();
 594       Label L;
 595       // Result must be 0 if value is NaN; test by comparing value to itself.
 596       __ fcmpu(CR0, rsrc, rsrc);
 597       __ li(dst->as_register_lo(), 0);
 598       __ bso(CR0, L);
 599       __ fctidz(rsrc, rsrc); // USE_KILL
 600       __ mffprd(dst->as_register_lo(), rsrc);
 601       __ bind(L);
 602       break;
 603     }
 604 
 605     default: ShouldNotReachHere();
 606   }
 607 }
 608 
 609 
 610 void LIR_Assembler::align_call(LIR_Code) {
 611   // do nothing since all instructions are word aligned on ppc
 612 }
 613 
 614 
 615 bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) {
 616   int start_offset = __ offset();
 617   // Put the entry point as a constant into the constant pool.
 618   const address entry_point_toc_addr   = __ address_constant(target, RelocationHolder::none);
 619   if (entry_point_toc_addr == nullptr) {
 620     bailout("const section overflow");
 621     return false;
 622   }
 623   const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
 624 
 625   // Emit the trampoline stub which will be related to the branch-and-link below.
 626   address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc);
 627   if (!stub) {
 628     bailout("no space for trampoline stub");
 629     return false;
 630   }
 631   return true;
 632 }
 633 
 634 
 635 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
 636   assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype");
 637 
 638   bool success = emit_trampoline_stub_for_call(op->addr());
 639   if (!success) { return; }
 640 
 641   __ relocate(rtype);
 642   // Note: At this point we do not have the address of the trampoline
 643   // stub, and the entry point might be too far away for bl, so __ pc()
 644   // serves as dummy and the bl will be patched later.
 645   __ code()->set_insts_mark();
 646   __ bl(__ pc());
 647   add_call_info(code_offset(), op->info());
 648   __ post_call_nop();
 649 }
 650 
 651 
 652 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
 653   __ calculate_address_from_global_toc(R2_TOC, __ method_toc());
 654 
 655   // Virtual call relocation will point to ic load.
 656   address virtual_call_meta_addr = __ pc();
 657   // Load a clear inline cache.
 658   AddressLiteral empty_ic((address) Universe::non_oop_word());
 659   bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC);
 660   if (!success) {
 661     bailout("const section overflow");
 662     return;
 663   }
 664   // Call to fixup routine. Fixup routine uses ScopeDesc info
 665   // to determine who we intended to call.
 666   __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
 667 
 668   success = emit_trampoline_stub_for_call(op->addr(), R2_TOC);
 669   if (!success) { return; }
 670 
 671   // Note: At this point we do not have the address of the trampoline
 672   // stub, and the entry point might be too far away for bl, so __ pc()
 673   // serves as dummy and the bl will be patched later.
 674   __ bl(__ pc());
 675   add_call_info(code_offset(), op->info());
 676   __ post_call_nop();
 677 }
 678 
 679 void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) {
 680   ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info);
 681   __ null_check(addr, stub->entry());
 682   append_code_stub(stub);
 683 }
 684 
 685 
 686 // Attention: caller must encode oop if needed
 687 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide) {
 688   int store_offset;
 689   if (!Assembler::is_simm16(offset)) {
 690     // For offsets larger than a simm16 we setup the offset.
 691     assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case");
 692     __ load_const_optimized(R0, offset);
 693     store_offset = store(from_reg, base, R0, type, wide);
 694   } else {
 695     store_offset = code_offset();
 696     switch (type) {
 697       case T_BOOLEAN: // fall through
 698       case T_BYTE  : __ stb(from_reg->as_register(), offset, base); break;
 699       case T_CHAR  :
 700       case T_SHORT : __ sth(from_reg->as_register(), offset, base); break;
 701       case T_INT   : __ stw(from_reg->as_register(), offset, base); break;
 702       case T_LONG  : __ std(from_reg->as_register_lo(), offset, base); break;
 703       case T_ADDRESS:
 704       case T_METADATA: __ std(from_reg->as_register(), offset, base); break;
 705       case T_ARRAY : // fall through
 706       case T_OBJECT:
 707         {
 708           if (UseCompressedOops && !wide) {
 709             // Encoding done in caller
 710             __ stw(from_reg->as_register(), offset, base);
 711             __ verify_coop(from_reg->as_register(), FILE_AND_LINE);
 712           } else {
 713             __ std(from_reg->as_register(), offset, base);
 714             if (VerifyOops) {
 715               BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 716               bs->check_oop(_masm, from_reg->as_register(), FILE_AND_LINE); // kills R0
 717             }
 718           }
 719           break;
 720         }
 721       case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break;
 722       case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break;
 723       default      : ShouldNotReachHere();
 724     }
 725   }
 726   return store_offset;
 727 }
 728 
 729 
 730 // Attention: caller must encode oop if needed
 731 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
 732   int store_offset = code_offset();
 733   switch (type) {
 734     case T_BOOLEAN: // fall through
 735     case T_BYTE  : __ stbx(from_reg->as_register(), base, disp); break;
 736     case T_CHAR  :
 737     case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break;
 738     case T_INT   : __ stwx(from_reg->as_register(), base, disp); break;
 739     case T_LONG  :
 740 #ifdef _LP64
 741       __ stdx(from_reg->as_register_lo(), base, disp);
 742 #else
 743       Unimplemented();
 744 #endif
 745       break;
 746     case T_ADDRESS:
 747       __ stdx(from_reg->as_register(), base, disp);
 748       break;
 749     case T_ARRAY : // fall through
 750     case T_OBJECT:
 751       {
 752         if (UseCompressedOops && !wide) {
 753           // Encoding done in caller.
 754           __ stwx(from_reg->as_register(), base, disp);
 755           __ verify_coop(from_reg->as_register(), FILE_AND_LINE); // kills R0
 756         } else {
 757           __ stdx(from_reg->as_register(), base, disp);
 758           if (VerifyOops) {
 759             BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 760             bs->check_oop(_masm, from_reg->as_register(), FILE_AND_LINE); // kills R0
 761           }
 762         }
 763         break;
 764       }
 765     case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break;
 766     case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break;
 767     default      : ShouldNotReachHere();
 768   }
 769   return store_offset;
 770 }
 771 
 772 
 773 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide) {
 774   int load_offset;
 775   if (!Assembler::is_simm16(offset)) {
 776     // For offsets larger than a simm16 we setup the offset.
 777     __ load_const_optimized(R0, offset);
 778     load_offset = load(base, R0, to_reg, type, wide);
 779   } else {
 780     load_offset = code_offset();
 781     switch(type) {
 782       case T_BOOLEAN: // fall through
 783       case T_BYTE  :   __ lbz(to_reg->as_register(), offset, base);
 784                        __ extsb(to_reg->as_register(), to_reg->as_register()); break;
 785       case T_CHAR  :   __ lhz(to_reg->as_register(), offset, base); break;
 786       case T_SHORT :   __ lha(to_reg->as_register(), offset, base); break;
 787       case T_INT   :   __ lwa(to_reg->as_register(), offset, base); break;
 788       case T_LONG  :   __ ld(to_reg->as_register_lo(), offset, base); break;
 789       case T_METADATA: __ ld(to_reg->as_register(), offset, base); break;
 790       case T_ADDRESS:
 791         __ ld(to_reg->as_register(), offset, base);
 792         break;
 793       case T_ARRAY : // fall through
 794       case T_OBJECT:
 795         {
 796           if (UseCompressedOops && !wide) {
 797             __ lwz(to_reg->as_register(), offset, base);
 798             __ decode_heap_oop(to_reg->as_register());
 799           } else {
 800             __ ld(to_reg->as_register(), offset, base);
 801           }
 802           break;
 803         }
 804       case T_FLOAT:  __ lfs(to_reg->as_float_reg(), offset, base); break;
 805       case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break;
 806       default      : ShouldNotReachHere();
 807     }
 808   }
 809   return load_offset;
 810 }
 811 
 812 
 813 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
 814   int load_offset = code_offset();
 815   switch(type) {
 816     case T_BOOLEAN: // fall through
 817     case T_BYTE  :  __ lbzx(to_reg->as_register(), base, disp);
 818                     __ extsb(to_reg->as_register(), to_reg->as_register()); break;
 819     case T_CHAR  :  __ lhzx(to_reg->as_register(), base, disp); break;
 820     case T_SHORT :  __ lhax(to_reg->as_register(), base, disp); break;
 821     case T_INT   :  __ lwax(to_reg->as_register(), base, disp); break;
 822     case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break;
 823     case T_ARRAY : // fall through
 824     case T_OBJECT:
 825       {
 826         if (UseCompressedOops && !wide) {
 827           __ lwzx(to_reg->as_register(), base, disp);
 828           __ decode_heap_oop(to_reg->as_register());
 829         } else {
 830           __ ldx(to_reg->as_register(), base, disp);
 831         }
 832         break;
 833       }
 834     case T_FLOAT:  __ lfsx(to_reg->as_float_reg() , base, disp); break;
 835     case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break;
 836     case T_LONG  :
 837 #ifdef _LP64
 838       __ ldx(to_reg->as_register_lo(), base, disp);
 839 #else
 840       Unimplemented();
 841 #endif
 842       break;
 843     default      : ShouldNotReachHere();
 844   }
 845   return load_offset;
 846 }
 847 
 848 
 849 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 850   LIR_Const* c = src->as_constant_ptr();
 851   Register src_reg = R0;
 852   switch (c->type()) {
 853     case T_INT:
 854     case T_FLOAT: {
 855       int value = c->as_jint_bits();
 856       __ load_const_optimized(src_reg, value);
 857       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 858       __ stw(src_reg, addr);
 859       break;
 860     }
 861     case T_ADDRESS: {
 862       int value = c->as_jint_bits();
 863       __ load_const_optimized(src_reg, value);
 864       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 865       __ std(src_reg, addr);
 866       break;
 867     }
 868     case T_OBJECT: {
 869       jobject2reg(c->as_jobject(), src_reg);
 870       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 871       __ std(src_reg, addr);
 872       break;
 873     }
 874     case T_LONG:
 875     case T_DOUBLE: {
 876       int value = c->as_jlong_bits();
 877       __ load_const_optimized(src_reg, value);
 878       Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
 879       __ std(src_reg, addr);
 880       break;
 881     }
 882     default:
 883       Unimplemented();
 884   }
 885 }
 886 
 887 
 888 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 889   LIR_Const* c = src->as_constant_ptr();
 890   LIR_Address* addr = dest->as_address_ptr();
 891   Register base = addr->base()->as_pointer_register();
 892   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 893   int offset = -1;
 894   // Null check for large offsets in LIRGenerator::do_StoreField.
 895   bool needs_explicit_null_check = !ImplicitNullChecks;
 896 
 897   if (info != nullptr && needs_explicit_null_check) {
 898     explicit_null_check(base, info);
 899   }
 900 
 901   switch (c->type()) {
 902     case T_FLOAT: type = T_INT;
 903     case T_INT:
 904     case T_ADDRESS: {
 905       tmp = FrameMap::R0_opr;
 906       __ load_const_optimized(tmp->as_register(), c->as_jint_bits());
 907       break;
 908     }
 909     case T_DOUBLE: type = T_LONG;
 910     case T_LONG: {
 911       tmp = FrameMap::R0_long_opr;
 912       __ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits());
 913       break;
 914     }
 915     case T_OBJECT: {
 916       tmp = FrameMap::R0_opr;
 917       if (UseCompressedOops && !wide && c->as_jobject() != nullptr) {
 918         AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject());
 919         // Don't care about sign extend (will use stw).
 920         __ lis(R0, 0); // Will get patched.
 921         __ relocate(oop_addr.rspec(), /*compressed format*/ 1);
 922         __ ori(R0, R0, 0); // Will get patched.
 923       } else {
 924         jobject2reg(c->as_jobject(), R0);
 925       }
 926       break;
 927     }
 928     default:
 929       Unimplemented();
 930   }
 931 
 932   // Handle either reg+reg or reg+disp address.
 933   if (addr->index()->is_valid()) {
 934     assert(addr->disp() == 0, "must be zero");
 935     offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
 936   } else {
 937     assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses");
 938     offset = store(tmp, base, addr->disp(), type, wide);
 939   }
 940 
 941   if (info != nullptr) {
 942     assert(offset != -1, "offset should've been set");
 943     if (!needs_explicit_null_check) {
 944       add_debug_info_for_null_check(offset, info);
 945     }
 946   }
 947 }
 948 
 949 
 950 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 951   LIR_Const* c = src->as_constant_ptr();
 952   LIR_Opr to_reg = dest;
 953 
 954   switch (c->type()) {
 955     case T_INT: {
 956       assert(patch_code == lir_patch_none, "no patching handled here");
 957       __ load_const_optimized(dest->as_register(), c->as_jint(), R0);
 958       break;
 959     }
 960     case T_ADDRESS: {
 961       assert(patch_code == lir_patch_none, "no patching handled here");
 962       __ load_const_optimized(dest->as_register(), c->as_jint(), R0);  // Yes, as_jint ...
 963       break;
 964     }
 965     case T_LONG: {
 966       assert(patch_code == lir_patch_none, "no patching handled here");
 967       __ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0);
 968       break;
 969     }
 970 
 971     case T_OBJECT: {
 972       if (patch_code == lir_patch_none) {
 973         jobject2reg(c->as_jobject(), to_reg->as_register());
 974       } else {
 975         jobject2reg_with_patching(to_reg->as_register(), info);
 976       }
 977       break;
 978     }
 979 
 980     case T_METADATA:
 981       {
 982         if (patch_code == lir_patch_none) {
 983           metadata2reg(c->as_metadata(), to_reg->as_register());
 984         } else {
 985           klass2reg_with_patching(to_reg->as_register(), info);
 986         }
 987       }
 988       break;
 989 
 990     case T_FLOAT:
 991       {
 992         if (to_reg->is_single_fpu()) {
 993           address const_addr = __ float_constant(c->as_jfloat());
 994           if (const_addr == nullptr) {
 995             bailout("const section overflow");
 996             break;
 997           }
 998           RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
 999           __ relocate(rspec);
1000           __ load_const(R0, const_addr);
1001           __ lfsx(to_reg->as_float_reg(), R0);
1002         } else {
1003           assert(to_reg->is_single_cpu(), "Must be a cpu register.");
1004           __ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0);
1005         }
1006       }
1007       break;
1008 
1009     case T_DOUBLE:
1010       {
1011         if (to_reg->is_double_fpu()) {
1012           address const_addr = __ double_constant(c->as_jdouble());
1013           if (const_addr == nullptr) {
1014             bailout("const section overflow");
1015             break;
1016           }
1017           RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1018           __ relocate(rspec);
1019           __ load_const(R0, const_addr);
1020           __ lfdx(to_reg->as_double_reg(), R0);
1021         } else {
1022           assert(to_reg->is_double_cpu(), "Must be a long register.");
1023           __ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0);
1024         }
1025       }
1026       break;
1027 
1028     default:
1029       ShouldNotReachHere();
1030   }
1031 }
1032 
1033 
1034 Address LIR_Assembler::as_Address(LIR_Address* addr) {
1035   Unimplemented(); return Address();
1036 }
1037 
1038 
1039 inline RegisterOrConstant index_or_disp(LIR_Address* addr) {
1040   if (addr->index()->is_illegal()) {
1041     return (RegisterOrConstant)(addr->disp());
1042   } else {
1043     return (RegisterOrConstant)(addr->index()->as_pointer_register());
1044   }
1045 }
1046 
1047 
1048 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1049   const Register tmp = R0;
1050   switch (type) {
1051     case T_INT:
1052     case T_FLOAT: {
1053       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1054       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1055       __ lwz(tmp, from);
1056       __ stw(tmp, to);
1057       break;
1058     }
1059     case T_ADDRESS:
1060     case T_OBJECT: {
1061       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1062       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1063       __ ld(tmp, from);
1064       __ std(tmp, to);
1065       break;
1066     }
1067     case T_LONG:
1068     case T_DOUBLE: {
1069       Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1070       Address to   = frame_map()->address_for_double_slot(dest->double_stack_ix());
1071       __ ld(tmp, from);
1072       __ std(tmp, to);
1073       break;
1074     }
1075 
1076     default:
1077       ShouldNotReachHere();
1078   }
1079 }
1080 
1081 
1082 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1083   Unimplemented(); return Address();
1084 }
1085 
1086 
1087 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1088   Unimplemented(); return Address();
1089 }
1090 
1091 
1092 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1093                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
1094 
1095   assert(type != T_METADATA, "load of metadata ptr not supported");
1096   LIR_Address* addr = src_opr->as_address_ptr();
1097   LIR_Opr to_reg = dest;
1098 
1099   Register src = addr->base()->as_pointer_register();
1100   Register disp_reg = noreg;
1101   int disp_value = addr->disp();
1102   bool needs_patching = (patch_code != lir_patch_none);
1103   // null check for large offsets in LIRGenerator::do_LoadField
1104   bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks;
1105 
1106   if (info != nullptr && needs_explicit_null_check) {
1107     explicit_null_check(src, info);
1108   }
1109 
1110   if (addr->base()->type() == T_OBJECT) {
1111     __ verify_oop(src, FILE_AND_LINE);
1112   }
1113 
1114   PatchingStub* patch = nullptr;
1115   if (needs_patching) {
1116     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1117     assert(!to_reg->is_double_cpu() ||
1118            patch_code == lir_patch_none ||
1119            patch_code == lir_patch_normal, "patching doesn't match register");
1120   }
1121 
1122   if (addr->index()->is_illegal()) {
1123     if (!Assembler::is_simm16(disp_value)) {
1124       if (needs_patching) {
1125         __ load_const32(R0, 0); // patchable int
1126       } else {
1127         __ load_const_optimized(R0, disp_value);
1128       }
1129       disp_reg = R0;
1130     }
1131   } else {
1132     disp_reg = addr->index()->as_pointer_register();
1133     assert(disp_value == 0, "can't handle 3 operand addresses");
1134   }
1135 
1136   // Remember the offset of the load. The patching_epilog must be done
1137   // before the call to add_debug_info, otherwise the PcDescs don't get
1138   // entered in increasing order.
1139   int offset;
1140 
1141   if (disp_reg == noreg) {
1142     assert(Assembler::is_simm16(disp_value), "should have set this up");
1143     offset = load(src, disp_value, to_reg, type, wide);
1144   } else {
1145     offset = load(src, disp_reg, to_reg, type, wide);
1146   }
1147 
1148   if (patch != nullptr) {
1149     patching_epilog(patch, patch_code, src, info);
1150   }
1151   if (info != nullptr && !needs_explicit_null_check) {
1152     add_debug_info_for_null_check(offset, info);
1153   }
1154 }
1155 
1156 
1157 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1158   Address addr;
1159   if (src->is_single_word()) {
1160     addr = frame_map()->address_for_slot(src->single_stack_ix());
1161   } else if (src->is_double_word())  {
1162     addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1163   }
1164 
1165   load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/);
1166 }
1167 
1168 
1169 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type) {
1170   Address addr;
1171   if (dest->is_single_word()) {
1172     addr = frame_map()->address_for_slot(dest->single_stack_ix());
1173   } else if (dest->is_double_word())  {
1174     addr = frame_map()->address_for_slot(dest->double_stack_ix());
1175   }
1176 
1177   store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/);
1178 }
1179 
1180 
1181 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1182   if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1183     if (from_reg->is_double_fpu()) {
1184       // double to double moves
1185       assert(to_reg->is_double_fpu(), "should match");
1186       __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg());
1187     } else {
1188       // float to float moves
1189       assert(to_reg->is_single_fpu(), "should match");
1190       __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg());
1191     }
1192   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1193     if (from_reg->is_double_cpu()) {
1194       __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register());
1195     } else if (to_reg->is_double_cpu()) {
1196       // int to int moves
1197       __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register());
1198     } else {
1199       // int to int moves
1200       __ mr_if_needed(to_reg->as_register(), from_reg->as_register());
1201     }
1202   } else {
1203     ShouldNotReachHere();
1204   }
1205   if (is_reference_type(to_reg->type())) {
1206     __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
1207   }
1208 }
1209 
1210 
1211 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1212                             LIR_PatchCode patch_code, CodeEmitInfo* info,
1213                             bool wide) {
1214   assert(type != T_METADATA, "store of metadata ptr not supported");
1215   LIR_Address* addr = dest->as_address_ptr();
1216 
1217   Register src = addr->base()->as_pointer_register();
1218   Register disp_reg = noreg;
1219   int disp_value = addr->disp();
1220   bool needs_patching = (patch_code != lir_patch_none);
1221   bool compress_oop = (is_reference_type(type)) && UseCompressedOops && !wide &&
1222                       CompressedOops::mode() != CompressedOops::UnscaledNarrowOop;
1223   bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value);
1224   bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29.
1225   // Null check for large offsets in LIRGenerator::do_StoreField.
1226   bool needs_explicit_null_check = !ImplicitNullChecks || use_R29;
1227 
1228   if (info != nullptr && needs_explicit_null_check) {
1229     explicit_null_check(src, info);
1230   }
1231 
1232   if (addr->base()->is_oop_register()) {
1233     __ verify_oop(src, FILE_AND_LINE);
1234   }
1235 
1236   PatchingStub* patch = nullptr;
1237   if (needs_patching) {
1238     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1239     assert(!from_reg->is_double_cpu() ||
1240            patch_code == lir_patch_none ||
1241            patch_code == lir_patch_normal, "patching doesn't match register");
1242   }
1243 
1244   if (addr->index()->is_illegal()) {
1245     if (load_disp) {
1246       disp_reg = use_R29 ? R29_TOC : R0;
1247       if (needs_patching) {
1248         __ load_const32(disp_reg, 0); // patchable int
1249       } else {
1250         __ load_const_optimized(disp_reg, disp_value);
1251       }
1252     }
1253   } else {
1254     disp_reg = addr->index()->as_pointer_register();
1255     assert(disp_value == 0, "can't handle 3 operand addresses");
1256   }
1257 
1258   // remember the offset of the store. The patching_epilog must be done
1259   // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1260   // entered in increasing order.
1261   int offset;
1262 
1263   if (compress_oop) {
1264     Register co = __ encode_heap_oop(R0, from_reg->as_register());
1265     from_reg = FrameMap::as_opr(co);
1266   }
1267 
1268   if (disp_reg == noreg) {
1269     assert(Assembler::is_simm16(disp_value), "should have set this up");
1270     offset = store(from_reg, src, disp_value, type, wide);
1271   } else {
1272     offset = store(from_reg, src, disp_reg, type, wide);
1273   }
1274 
1275   if (use_R29) {
1276     __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit
1277   }
1278 
1279   if (patch != nullptr) {
1280     patching_epilog(patch, patch_code, src, info);
1281   }
1282 
1283   if (info != nullptr && !needs_explicit_null_check) {
1284     add_debug_info_for_null_check(offset, info);
1285   }
1286 }
1287 
1288 
1289 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
1290   const Register return_pc = R31;  // Must survive C-call to enable_stack_reserved_zone().
1291   const Register temp      = R12;
1292 
1293   // Pop the stack before the safepoint code.
1294   int frame_size = initial_frame_size_in_bytes();
1295   if (Assembler::is_simm(frame_size, 16)) {
1296     __ addi(R1_SP, R1_SP, frame_size);
1297   } else {
1298     __ pop_frame();
1299   }
1300 
1301   // Restore return pc relative to callers' sp.
1302   __ ld(return_pc, _abi0(lr), R1_SP);
1303   // Move return pc to LR.
1304   __ mtlr(return_pc);
1305 
1306   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
1307     __ reserved_stack_check(return_pc);
1308   }
1309 
1310   // We need to mark the code position where the load from the safepoint
1311   // polling page was emitted as relocInfo::poll_return_type here.
1312   if (!UseSIGTRAP) {
1313     code_stub->set_safepoint_offset(__ offset());
1314     __ relocate(relocInfo::poll_return_type);
1315   }
1316   __ safepoint_poll(*code_stub->entry(), temp, true /* at_return */, true /* in_nmethod */);
1317 
1318   // Return.
1319   __ blr();
1320 }
1321 
1322 
1323 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1324   const Register poll_addr = tmp->as_register();
1325   __ ld(poll_addr, in_bytes(JavaThread::polling_page_offset()), R16_thread);
1326   if (info != nullptr) {
1327     add_debug_info_for_branch(info);
1328   }
1329   int offset = __ offset();
1330   __ relocate(relocInfo::poll_type);
1331   __ load_from_polling_page(poll_addr);
1332 
1333   return offset;
1334 }
1335 
1336 
1337 void LIR_Assembler::emit_static_call_stub() {
1338   address call_pc = __ pc();
1339   address stub = __ start_a_stub(static_call_stub_size());
1340   if (stub == nullptr) {
1341     bailout("static call stub overflow");
1342     return;
1343   }
1344 
1345   // For java_to_interp stubs we use R11_scratch1 as scratch register
1346   // and in call trampoline stubs we use R12_scratch2. This way we
1347   // can distinguish them (see is_NativeCallTrampolineStub_at()).
1348   const Register reg_scratch = R11_scratch1;
1349 
1350   // Create a static stub relocation which relates this stub
1351   // with the call instruction at insts_call_instruction_offset in the
1352   // instructions code-section.
1353   int start = __ offset();
1354   __ relocate(static_stub_Relocation::spec(call_pc));
1355 
1356   // Now, create the stub's code:
1357   // - load the TOC
1358   // - load the inline cache oop from the constant pool
1359   // - load the call target from the constant pool
1360   // - call
1361   __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
1362   AddressLiteral ic = __ allocate_metadata_address((Metadata *)nullptr);
1363   bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true);
1364 
1365   if (ReoptimizeCallSequences) {
1366     __ b64_patchable((address)-1, relocInfo::none);
1367   } else {
1368     AddressLiteral a((address)-1);
1369     success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true);
1370     __ mtctr(reg_scratch);
1371     __ bctr();
1372   }
1373   if (!success) {
1374     bailout("const section overflow");
1375     return;
1376   }
1377 
1378   assert(__ offset() - start <= static_call_stub_size(), "stub too big");
1379   __ end_a_stub();
1380 }
1381 
1382 
1383 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1384   bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual);
1385   if (opr1->is_single_fpu()) {
1386     __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg());
1387   } else if (opr1->is_double_fpu()) {
1388     __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg());
1389   } else if (opr1->is_single_cpu()) {
1390     if (opr2->is_constant()) {
1391       switch (opr2->as_constant_ptr()->type()) {
1392         case T_INT:
1393           {
1394             jint con = opr2->as_constant_ptr()->as_jint();
1395             if (unsigned_comp) {
1396               if (Assembler::is_uimm(con, 16)) {
1397                 __ cmplwi(BOOL_RESULT, opr1->as_register(), con);
1398               } else {
1399                 __ load_const_optimized(R0, con);
1400                 __ cmplw(BOOL_RESULT, opr1->as_register(), R0);
1401               }
1402             } else {
1403               if (Assembler::is_simm(con, 16)) {
1404                 __ cmpwi(BOOL_RESULT, opr1->as_register(), con);
1405               } else {
1406                 __ load_const_optimized(R0, con);
1407                 __ cmpw(BOOL_RESULT, opr1->as_register(), R0);
1408               }
1409             }
1410           }
1411           break;
1412 
1413         case T_OBJECT:
1414           // There are only equal/notequal comparisons on objects.
1415           {
1416             assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1417             jobject con = opr2->as_constant_ptr()->as_jobject();
1418             if (con == nullptr) {
1419               __ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
1420             } else {
1421               jobject2reg(con, R0);
1422               __ cmpd(BOOL_RESULT, opr1->as_register(), R0);
1423             }
1424           }
1425           break;
1426 
1427         case T_METADATA:
1428           // We only need, for now, comparison with null for metadata.
1429           {
1430             assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1431             Metadata* p = opr2->as_constant_ptr()->as_metadata();
1432             if (p == nullptr) {
1433               __ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
1434             } else {
1435               ShouldNotReachHere();
1436             }
1437           }
1438           break;
1439 
1440         default:
1441           ShouldNotReachHere();
1442           break;
1443       }
1444     } else {
1445       assert(opr1->type() != T_ADDRESS && opr2->type() != T_ADDRESS, "currently unsupported");
1446       if (is_reference_type(opr1->type())) {
1447         // There are only equal/notequal comparisons on objects.
1448         assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1449         __ cmpd(BOOL_RESULT, opr1->as_register(), opr2->as_register());
1450       } else {
1451         if (unsigned_comp) {
1452           __ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register());
1453         } else {
1454           __ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register());
1455         }
1456       }
1457     }
1458   } else if (opr1->is_double_cpu()) {
1459     if (opr2->is_constant()) {
1460       jlong con = opr2->as_constant_ptr()->as_jlong();
1461       if (unsigned_comp) {
1462         if (Assembler::is_uimm(con, 16)) {
1463           __ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con);
1464         } else {
1465           __ load_const_optimized(R0, con);
1466           __ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0);
1467         }
1468       } else {
1469         if (Assembler::is_simm(con, 16)) {
1470           __ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con);
1471         } else {
1472           __ load_const_optimized(R0, con);
1473           __ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0);
1474         }
1475       }
1476     } else if (opr2->is_register()) {
1477       if (unsigned_comp) {
1478         __ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo());
1479       } else {
1480         __ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo());
1481       }
1482     } else {
1483       ShouldNotReachHere();
1484     }
1485   } else {
1486     ShouldNotReachHere();
1487   }
1488 }
1489 
1490 
1491 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1492   const Register Rdst = dst->as_register();
1493   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1494     bool is_unordered_less = (code == lir_ucmp_fd2i);
1495     if (left->is_single_fpu()) {
1496       __ fcmpu(CR0, left->as_float_reg(), right->as_float_reg());
1497     } else if (left->is_double_fpu()) {
1498       __ fcmpu(CR0, left->as_double_reg(), right->as_double_reg());
1499     } else {
1500       ShouldNotReachHere();
1501     }
1502     __ set_cmpu3(Rdst, is_unordered_less); // is_unordered_less ? -1 : 1
1503   } else if (code == lir_cmp_l2i) {
1504     __ cmpd(CR0, left->as_register_lo(), right->as_register_lo());
1505     __ set_cmp3(Rdst);  // set result as follows: <: -1, =: 0, >: 1
1506   } else {
1507     ShouldNotReachHere();
1508   }
1509 }
1510 
1511 
1512 inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) {
1513   if (src->is_constant()) {
1514     lasm->const2reg(src, dst, lir_patch_none, nullptr);
1515   } else if (src->is_register()) {
1516     lasm->reg2reg(src, dst);
1517   } else if (src->is_stack()) {
1518     lasm->stack2reg(src, dst, dst->type());
1519   } else {
1520     ShouldNotReachHere();
1521   }
1522 }
1523 
1524 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1525                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1526   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on ppc");
1527 
1528   if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) {
1529     load_to_reg(this, opr1, result); // Condition doesn't matter.
1530     return;
1531   }
1532 
1533   bool positive = false;
1534   Assembler::Condition cond = Assembler::equal;
1535   switch (condition) {
1536     case lir_cond_equal:        positive = true ; cond = Assembler::equal  ; break;
1537     case lir_cond_notEqual:     positive = false; cond = Assembler::equal  ; break;
1538     case lir_cond_less:         positive = true ; cond = Assembler::less   ; break;
1539     case lir_cond_belowEqual:
1540     case lir_cond_lessEqual:    positive = false; cond = Assembler::greater; break;
1541     case lir_cond_greater:      positive = true ; cond = Assembler::greater; break;
1542     case lir_cond_aboveEqual:
1543     case lir_cond_greaterEqual: positive = false; cond = Assembler::less   ; break;
1544     default:                    ShouldNotReachHere();
1545   }
1546 
1547   if (result->is_cpu_register()) {
1548     bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register();
1549     const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo();
1550 
1551     // We can use result_reg to load one operand if not already in register.
1552     Register first  = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg,
1553              second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg;
1554 
1555     if (first != second) {
1556       if (!o1_is_reg) {
1557         load_to_reg(this, opr1, result);
1558       }
1559 
1560       if (!o2_is_reg) {
1561         load_to_reg(this, opr2, result);
1562       }
1563 
1564       __ isel(result_reg, BOOL_RESULT, cond, !positive, first, second);
1565       return;
1566     }
1567   } // isel
1568 
1569   load_to_reg(this, opr1, result);
1570 
1571   Label skip;
1572   int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
1573   int bi = Assembler::bi0(BOOL_RESULT, cond);
1574   __ bc(bo, bi, skip);
1575 
1576   load_to_reg(this, opr2, result);
1577   __ bind(skip);
1578 }
1579 
1580 
1581 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
1582                              CodeEmitInfo* info) {
1583   assert(info == nullptr, "unused on this code path");
1584   assert(left->is_register(), "wrong items state");
1585   assert(dest->is_register(), "wrong items state");
1586 
1587   if (right->is_register()) {
1588     if (dest->is_float_kind()) {
1589 
1590       FloatRegister lreg, rreg, res;
1591       if (right->is_single_fpu()) {
1592         lreg = left->as_float_reg();
1593         rreg = right->as_float_reg();
1594         res  = dest->as_float_reg();
1595         switch (code) {
1596           case lir_add: __ fadds(res, lreg, rreg); break;
1597           case lir_sub: __ fsubs(res, lreg, rreg); break;
1598           case lir_mul: __ fmuls(res, lreg, rreg); break;
1599           case lir_div: __ fdivs(res, lreg, rreg); break;
1600           default: ShouldNotReachHere();
1601         }
1602       } else {
1603         lreg = left->as_double_reg();
1604         rreg = right->as_double_reg();
1605         res  = dest->as_double_reg();
1606         switch (code) {
1607           case lir_add: __ fadd(res, lreg, rreg); break;
1608           case lir_sub: __ fsub(res, lreg, rreg); break;
1609           case lir_mul: __ fmul(res, lreg, rreg); break;
1610           case lir_div: __ fdiv(res, lreg, rreg); break;
1611           default: ShouldNotReachHere();
1612         }
1613       }
1614 
1615     } else if (dest->is_double_cpu()) {
1616 
1617       Register dst_lo = dest->as_register_lo();
1618       Register op1_lo = left->as_pointer_register();
1619       Register op2_lo = right->as_pointer_register();
1620 
1621       switch (code) {
1622         case lir_add: __ add(dst_lo, op1_lo, op2_lo); break;
1623         case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break;
1624         case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break;
1625         default: ShouldNotReachHere();
1626       }
1627     } else {
1628       assert (right->is_single_cpu(), "Just Checking");
1629 
1630       Register lreg = left->as_register();
1631       Register res  = dest->as_register();
1632       Register rreg = right->as_register();
1633       switch (code) {
1634         case lir_add:  __ add  (res, lreg, rreg); break;
1635         case lir_sub:  __ sub  (res, lreg, rreg); break;
1636         case lir_mul:  __ mullw(res, lreg, rreg); break;
1637         default: ShouldNotReachHere();
1638       }
1639     }
1640   } else {
1641     assert (right->is_constant(), "must be constant");
1642 
1643     if (dest->is_single_cpu()) {
1644       Register lreg = left->as_register();
1645       Register res  = dest->as_register();
1646       int    simm16 = right->as_constant_ptr()->as_jint();
1647 
1648       switch (code) {
1649         case lir_sub:  assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int
1650                        simm16 = -simm16;
1651         case lir_add:  if (res == lreg && simm16 == 0) break;
1652                        __ addi(res, lreg, simm16); break;
1653         case lir_mul:  if (res == lreg && simm16 == 1) break;
1654                        __ mulli(res, lreg, simm16); break;
1655         default: ShouldNotReachHere();
1656       }
1657     } else {
1658       Register lreg = left->as_pointer_register();
1659       Register res  = dest->as_register_lo();
1660       long con = right->as_constant_ptr()->as_jlong();
1661       assert(Assembler::is_simm16(con), "must be simm16");
1662 
1663       switch (code) {
1664         case lir_sub:  assert(Assembler::is_simm16(-con), "cannot encode");  // see do_ArithmeticOp_Long
1665                        con = -con;
1666         case lir_add:  if (res == lreg && con == 0) break;
1667                        __ addi(res, lreg, (int)con); break;
1668         case lir_mul:  if (res == lreg && con == 1) break;
1669                        __ mulli(res, lreg, (int)con); break;
1670         default: ShouldNotReachHere();
1671       }
1672     }
1673   }
1674 }
1675 
1676 
1677 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1678   switch (code) {
1679     case lir_sqrt: {
1680       __ fsqrt(dest->as_double_reg(), value->as_double_reg());
1681       break;
1682     }
1683     case lir_abs: {
1684       __ fabs(dest->as_double_reg(), value->as_double_reg());
1685       break;
1686     }
1687     case lir_f2hf: {
1688       __ f2hf(dest.as_register(), value.as_float_reg(), tmp.as_float_reg());
1689       break;
1690     }
1691     case lir_hf2f: {
1692       __ hf2f(dest->as_float_reg(), value.as_register());
1693       break;
1694     }
1695     default: {
1696       ShouldNotReachHere();
1697       break;
1698     }
1699   }
1700 }
1701 
1702 
1703 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1704   if (right->is_constant()) { // see do_LogicOp
1705     long uimm;
1706     Register d, l;
1707     if (dest->is_single_cpu()) {
1708       uimm = right->as_constant_ptr()->as_jint();
1709       d = dest->as_register();
1710       l = left->as_register();
1711     } else {
1712       uimm = right->as_constant_ptr()->as_jlong();
1713       d = dest->as_register_lo();
1714       l = left->as_register_lo();
1715     }
1716     long uimms  = (unsigned long)uimm >> 16,
1717          uimmss = (unsigned long)uimm >> 32;
1718 
1719     switch (code) {
1720       case lir_logic_and:
1721         if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2(uimm)) {
1722           __ andi(d, l, uimm); // special cases
1723         } else if (uimms != 0) { __ andis_(d, l, uimms); }
1724         else { __ andi_(d, l, uimm); }
1725         break;
1726 
1727       case lir_logic_or:
1728         if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); }
1729         else { __ ori(d, l, uimm); }
1730         break;
1731 
1732       case lir_logic_xor:
1733         if (uimm == -1) { __ nand(d, l, l); } // special case
1734         else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); }
1735         else { __ xori(d, l, uimm); }
1736         break;
1737 
1738       default: ShouldNotReachHere();
1739     }
1740   } else {
1741     assert(right->is_register(), "right should be in register");
1742 
1743     if (dest->is_single_cpu()) {
1744       switch (code) {
1745         case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break;
1746         case lir_logic_or:  __ orr (dest->as_register(), left->as_register(), right->as_register()); break;
1747         case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break;
1748         default: ShouldNotReachHere();
1749       }
1750     } else {
1751       Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
1752                                                                         left->as_register_lo();
1753       Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
1754                                                                           right->as_register_lo();
1755 
1756       switch (code) {
1757         case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break;
1758         case lir_logic_or:  __ orr (dest->as_register_lo(), l, r); break;
1759         case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break;
1760         default: ShouldNotReachHere();
1761       }
1762     }
1763   }
1764 }
1765 
1766 
1767 int LIR_Assembler::shift_amount(BasicType t) {
1768   int elem_size = type2aelembytes(t);
1769   switch (elem_size) {
1770     case 1 : return 0;
1771     case 2 : return 1;
1772     case 4 : return 2;
1773     case 8 : return 3;
1774   }
1775   ShouldNotReachHere();
1776   return -1;
1777 }
1778 
1779 
1780 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1781   info->add_register_oop(exceptionOop);
1782 
1783   // Reuse the debug info from the safepoint poll for the throw op itself.
1784   address pc_for_athrow = __ pc();
1785   int pc_for_athrow_offset = __ offset();
1786   //RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
1787   //__ relocate(rspec);
1788   //__ load_const(exceptionPC->as_register(), pc_for_athrow, R0);
1789   __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true);
1790   add_call_info(pc_for_athrow_offset, info); // for exception handler
1791 
1792   address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? StubId::c1_handle_exception_id
1793                                                                    : StubId::c1_handle_exception_nofpu_id);
1794   //__ load_const_optimized(R0, stub);
1795   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
1796   __ mtctr(R0);
1797   __ bctr();
1798 }
1799 
1800 
1801 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1802   // Note: Not used with EnableDebuggingOnDemand.
1803   assert(exceptionOop->as_register() == R3, "should match");
1804   __ b(_unwind_handler_entry);
1805 }
1806 
1807 
1808 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
1809   Register src = op->src()->as_register();
1810   Register dst = op->dst()->as_register();
1811   Register src_pos = op->src_pos()->as_register();
1812   Register dst_pos = op->dst_pos()->as_register();
1813   Register length  = op->length()->as_register();
1814   Register tmp = op->tmp()->as_register();
1815   Register tmp2 = R0;
1816 
1817   int flags = op->flags();
1818   ciArrayKlass* default_type = op->expected_type();
1819   BasicType basic_type = (default_type != nullptr) ? default_type->element_type()->basic_type() : T_ILLEGAL;
1820   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
1821 
1822   // Set up the arraycopy stub information.
1823   ArrayCopyStub* stub = op->stub();
1824 
1825   // Always do stub if no type information is available. It's ok if
1826   // the known type isn't loaded since the code sanity checks
1827   // in debug mode and the type isn't required when we know the exact type
1828   // also check that the type is an array type.
1829   if (default_type == nullptr) {
1830     assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() &&
1831            length->is_nonvolatile(), "must preserve");
1832     address copyfunc_addr = StubRoutines::generic_arraycopy();
1833     assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
1834 
1835     // 3 parms are int. Convert to long.
1836     __ mr(R3_ARG1, src);
1837     __ extsw(R4_ARG2, src_pos);
1838     __ mr(R5_ARG3, dst);
1839     __ extsw(R6_ARG4, dst_pos);
1840     __ extsw(R7_ARG5, length);
1841 
1842 #ifndef PRODUCT
1843     if (PrintC1Statistics) {
1844       address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
1845       int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
1846       __ lwz(R11_scratch1, simm16_offs, tmp);
1847       __ addi(R11_scratch1, R11_scratch1, 1);
1848       __ stw(R11_scratch1, simm16_offs, tmp);
1849     }
1850 #endif
1851     __ call_c(copyfunc_addr, relocInfo::runtime_call_type);
1852 
1853     __ nand(tmp, R3_RET, R3_RET);
1854     __ subf(length, tmp, length);
1855     __ add(src_pos, tmp, src_pos);
1856     __ add(dst_pos, tmp, dst_pos);
1857 
1858     __ cmpwi(CR0, R3_RET, 0);
1859     __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::less), *stub->entry());
1860     __ bind(*stub->continuation());
1861     return;
1862   }
1863 
1864   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
1865   Label cont, slow, copyfunc;
1866 
1867   bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check |
1868                                         LIR_OpArrayCopy::dst_null_check |
1869                                         LIR_OpArrayCopy::src_pos_positive_check |
1870                                         LIR_OpArrayCopy::dst_pos_positive_check |
1871                                         LIR_OpArrayCopy::length_positive_check);
1872 
1873   // Use only one conditional branch for simple checks.
1874   if (simple_check_flag_set) {
1875     ConditionRegister combined_check = CR1, tmp_check = CR1;
1876 
1877     // Make sure src and dst are non-null.
1878     if (flags & LIR_OpArrayCopy::src_null_check) {
1879       __ cmpdi(combined_check, src, 0);
1880       tmp_check = CR0;
1881     }
1882 
1883     if (flags & LIR_OpArrayCopy::dst_null_check) {
1884       __ cmpdi(tmp_check, dst, 0);
1885       if (tmp_check != combined_check) {
1886         __ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal);
1887       }
1888       tmp_check = CR0;
1889     }
1890 
1891     // Clear combined_check.eq if not already used.
1892     if (tmp_check == combined_check) {
1893       __ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal);
1894       tmp_check = CR0;
1895     }
1896 
1897     if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
1898       // Test src_pos register.
1899       __ cmpwi(tmp_check, src_pos, 0);
1900       __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);
1901     }
1902 
1903     if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
1904       // Test dst_pos register.
1905       __ cmpwi(tmp_check, dst_pos, 0);
1906       __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);
1907     }
1908 
1909     if (flags & LIR_OpArrayCopy::length_positive_check) {
1910       // Make sure length isn't negative.
1911       __ cmpwi(tmp_check, length, 0);
1912       __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);
1913     }
1914 
1915     __ beq(combined_check, slow);
1916   }
1917 
1918   // If the compiler was not able to prove that exact type of the source or the destination
1919   // of the arraycopy is an array type, check at runtime if the source or the destination is
1920   // an instance type.
1921   if (flags & LIR_OpArrayCopy::type_check) {
1922     if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
1923       __ load_klass(tmp, dst);
1924       __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
1925       __ cmpwi(CR0, tmp2, Klass::_lh_neutral_value);
1926       __ bge(CR0, slow);
1927     }
1928 
1929     if (!(flags & LIR_OpArrayCopy::src_objarray)) {
1930       __ load_klass(tmp, src);
1931       __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
1932       __ cmpwi(CR0, tmp2, Klass::_lh_neutral_value);
1933       __ bge(CR0, slow);
1934     }
1935   }
1936 
1937   // Higher 32bits must be null.
1938   __ extsw(length, length);
1939 
1940   __ extsw(src_pos, src_pos);
1941   if (flags & LIR_OpArrayCopy::src_range_check) {
1942     __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src);
1943     __ add(tmp, length, src_pos);
1944     __ cmpld(CR0, tmp2, tmp);
1945     __ ble(CR0, slow);
1946   }
1947 
1948   __ extsw(dst_pos, dst_pos);
1949   if (flags & LIR_OpArrayCopy::dst_range_check) {
1950     __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst);
1951     __ add(tmp, length, dst_pos);
1952     __ cmpld(CR0, tmp2, tmp);
1953     __ ble(CR0, slow);
1954   }
1955 
1956   int shift = shift_amount(basic_type);
1957 
1958   if (!(flags & LIR_OpArrayCopy::type_check)) {
1959     if (stub != nullptr) {
1960       __ b(cont);
1961       __ bind(slow);
1962       __ b(*stub->entry());
1963     }
1964   } else {
1965     // We don't know the array types are compatible.
1966     if (basic_type != T_OBJECT) {
1967       // Simple test for basic type arrays.
1968       __ cmp_klasses_from_objects(CR0, src, dst, tmp, tmp2);
1969       __ beq(CR0, cont);
1970     } else {
1971       // For object arrays, if src is a sub class of dst then we can
1972       // safely do the copy.
1973       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
1974 
1975       const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf
1976       assert_different_registers(tmp, tmp2, sub_klass, super_klass);
1977 
1978       __ load_klass(sub_klass, src);
1979       __ load_klass(super_klass, dst);
1980 
1981       __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2,
1982                                        &cont, copyfunc_addr != nullptr ? &copyfunc : &slow, nullptr);
1983 
1984       address slow_stc = Runtime1::entry_for(StubId::c1_slow_subtype_check_id);
1985       //__ load_const_optimized(tmp, slow_stc, tmp2);
1986       __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false);
1987       __ mtctr(tmp);
1988       __ bctrl(); // sets CR0
1989       __ beq(CR0, cont);
1990 
1991       if (copyfunc_addr != nullptr) { // Use stub if available.
1992         __ bind(copyfunc);
1993         // Src is not a sub class of dst so we have to do a
1994         // per-element check.
1995         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
1996         if ((flags & mask) != mask) {
1997           assert(flags & mask, "one of the two should be known to be an object array");
1998 
1999           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2000             __ load_klass(tmp, src);
2001           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2002             __ load_klass(tmp, dst);
2003           }
2004 
2005           __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
2006 
2007           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2008           __ load_const_optimized(tmp, objArray_lh);
2009           __ cmpw(CR0, tmp, tmp2);
2010           __ bne(CR0, slow);
2011         }
2012 
2013         Register src_ptr = R3_ARG1;
2014         Register dst_ptr = R4_ARG2;
2015         Register len     = R5_ARG3;
2016         Register chk_off = R6_ARG4;
2017         Register super_k = R7_ARG5;
2018 
2019         __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2020         __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2021         if (shift == 0) {
2022           __ add(src_ptr, src_pos, src_ptr);
2023           __ add(dst_ptr, dst_pos, dst_ptr);
2024         } else {
2025           __ sldi(tmp, src_pos, shift);
2026           __ sldi(tmp2, dst_pos, shift);
2027           __ add(src_ptr, tmp, src_ptr);
2028           __ add(dst_ptr, tmp2, dst_ptr);
2029         }
2030 
2031         __ load_klass(tmp, dst);
2032         __ mr(len, length);
2033 
2034         int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2035         __ ld(super_k, ek_offset, tmp);
2036 
2037         int sco_offset = in_bytes(Klass::super_check_offset_offset());
2038         __ lwz(chk_off, sco_offset, super_k);
2039 
2040         __ call_c(copyfunc_addr, relocInfo::runtime_call_type);
2041 
2042 #ifndef PRODUCT
2043         if (PrintC1Statistics) {
2044           Label failed;
2045           __ cmpwi(CR0, R3_RET, 0);
2046           __ bne(CR0, failed);
2047           address counter = (address)&Runtime1::_arraycopy_checkcast_cnt;
2048           int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
2049           __ lwz(R11_scratch1, simm16_offs, tmp);
2050           __ addi(R11_scratch1, R11_scratch1, 1);
2051           __ stw(R11_scratch1, simm16_offs, tmp);
2052           __ bind(failed);
2053         }
2054 #endif
2055 
2056         __ nand(tmp, R3_RET, R3_RET);
2057         __ cmpwi(CR0, R3_RET, 0);
2058         __ beq(CR0, *stub->continuation());
2059 
2060 #ifndef PRODUCT
2061         if (PrintC1Statistics) {
2062           address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt;
2063           int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
2064           __ lwz(R11_scratch1, simm16_offs, tmp);
2065           __ addi(R11_scratch1, R11_scratch1, 1);
2066           __ stw(R11_scratch1, simm16_offs, tmp);
2067         }
2068 #endif
2069 
2070         __ subf(length, tmp, length);
2071         __ add(src_pos, tmp, src_pos);
2072         __ add(dst_pos, tmp, dst_pos);
2073       }
2074     }
2075     __ bind(slow);
2076     __ b(*stub->entry());
2077   }
2078   __ bind(cont);
2079 
2080 #ifdef ASSERT
2081   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2082     // Sanity check the known type with the incoming class. For the
2083     // primitive case the types must match exactly with src.klass and
2084     // dst.klass each exactly matching the default type. For the
2085     // object array case, if no type check is needed then either the
2086     // dst type is exactly the expected type and the src type is a
2087     // subtype which we can't check or src is the same array as dst
2088     // but not necessarily exactly of type default_type.
2089     Label known_ok, halt;
2090     metadata2reg(default_type->constant_encoding(), tmp);
2091     __ cmp_klass(CR0, dst, tmp, R11_scratch1, R12_scratch2);
2092     if (basic_type != T_OBJECT) {
2093       __ bne(CR0, halt);
2094       __ cmp_klass(CR0, src, tmp, R11_scratch1, R12_scratch2);
2095       __ beq(CR0, known_ok);
2096     } else {
2097       __ beq(CR0, known_ok);
2098       __ cmpw(CR0, src, dst);
2099       __ beq(CR0, known_ok);
2100     }
2101     __ bind(halt);
2102     __ stop("incorrect type information in arraycopy");
2103     __ bind(known_ok);
2104   }
2105 #endif
2106 
2107 #ifndef PRODUCT
2108   if (PrintC1Statistics) {
2109     address counter = Runtime1::arraycopy_count_address(basic_type);
2110     int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
2111     __ lwz(R11_scratch1, simm16_offs, tmp);
2112     __ addi(R11_scratch1, R11_scratch1, 1);
2113     __ stw(R11_scratch1, simm16_offs, tmp);
2114   }
2115 #endif
2116 
2117   Register src_ptr = R3_ARG1;
2118   Register dst_ptr = R4_ARG2;
2119   Register len     = R5_ARG3;
2120 
2121   __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2122   __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2123   if (shift == 0) {
2124     __ add(src_ptr, src_pos, src_ptr);
2125     __ add(dst_ptr, dst_pos, dst_ptr);
2126   } else {
2127     __ sldi(tmp, src_pos, shift);
2128     __ sldi(tmp2, dst_pos, shift);
2129     __ add(src_ptr, tmp, src_ptr);
2130     __ add(dst_ptr, tmp2, dst_ptr);
2131   }
2132 
2133   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2134   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2135   const char *name;
2136   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2137 
2138   // Arraycopy stubs takes a length in number of elements, so don't scale it.
2139   __ mr(len, length);
2140   __ call_c(entry, relocInfo::runtime_call_type);
2141 
2142   if (stub != nullptr) {
2143     __ bind(*stub->continuation());
2144   }
2145 }
2146 
2147 
2148 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2149   if (dest->is_single_cpu()) {
2150     __ rldicl(tmp->as_register(), count->as_register(), 0, 64-5);
2151 #ifdef _LP64
2152     if (left->type() == T_OBJECT) {
2153       switch (code) {
2154         case lir_shl:  __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break;
2155         case lir_shr:  __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break;
2156         case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break;
2157         default: ShouldNotReachHere();
2158       }
2159     } else
2160 #endif
2161       switch (code) {
2162         case lir_shl:  __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break;
2163         case lir_shr:  __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break;
2164         case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break;
2165         default: ShouldNotReachHere();
2166       }
2167   } else {
2168     __ rldicl(tmp->as_register(), count->as_register(), 0, 64-6);
2169     switch (code) {
2170       case lir_shl:  __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;
2171       case lir_shr:  __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;
2172       case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;
2173       default: ShouldNotReachHere();
2174     }
2175   }
2176 }
2177 
2178 
2179 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2180 #ifdef _LP64
2181   if (left->type() == T_OBJECT) {
2182     count = count & 63;  // Shouldn't shift by more than sizeof(intptr_t).
2183     if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); }
2184     else {
2185       switch (code) {
2186         case lir_shl:  __ sldi(dest->as_register_lo(), left->as_register(), count); break;
2187         case lir_shr:  __ sradi(dest->as_register_lo(), left->as_register(), count); break;
2188         case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break;
2189         default: ShouldNotReachHere();
2190       }
2191     }
2192     return;
2193   }
2194 #endif
2195 
2196   if (dest->is_single_cpu()) {
2197     count = count & 0x1F; // Java spec
2198     if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); }
2199     else {
2200       switch (code) {
2201         case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break;
2202         case lir_shr:  __ srawi(dest->as_register(), left->as_register(), count); break;
2203         case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break;
2204         default: ShouldNotReachHere();
2205       }
2206     }
2207   } else if (dest->is_double_cpu()) {
2208     count = count & 63; // Java spec
2209     if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); }
2210     else {
2211       switch (code) {
2212         case lir_shl:  __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break;
2213         case lir_shr:  __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break;
2214         case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break;
2215         default: ShouldNotReachHere();
2216       }
2217     }
2218   } else {
2219     ShouldNotReachHere();
2220   }
2221 }
2222 
2223 
2224 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2225   if (op->init_check()) {
2226     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2227       explicit_null_check(op->klass()->as_register(), op->stub()->info());
2228     } else {
2229       add_debug_info_for_null_check_here(op->stub()->info());
2230     }
2231     __ lbz(op->tmp1()->as_register(),
2232            in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register());
2233     // acquire barrier included in membar_storestore() which follows the allocation immediately.
2234     __ cmpwi(CR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
2235     __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), *op->stub()->entry());
2236   }
2237   __ allocate_object(op->obj()->as_register(),
2238                      op->tmp1()->as_register(),
2239                      op->tmp2()->as_register(),
2240                      op->tmp3()->as_register(),
2241                      op->header_size(),
2242                      op->object_size(),
2243                      op->klass()->as_register(),
2244                      *op->stub()->entry());
2245 
2246   __ bind(*op->stub()->continuation());
2247   __ verify_oop(op->obj()->as_register(), FILE_AND_LINE);
2248 }
2249 
2250 
2251 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2252   LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); )
2253   if (UseSlowPath ||
2254       (!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
2255       (!UseFastNewTypeArray   && (!is_reference_type(op->type())))) {
2256     __ b(*op->stub()->entry());
2257   } else {
2258     __ allocate_array(op->obj()->as_register(),
2259                       op->len()->as_register(),
2260                       op->tmp1()->as_register(),
2261                       op->tmp2()->as_register(),
2262                       op->tmp3()->as_register(),
2263                       arrayOopDesc::base_offset_in_bytes(op->type()),
2264                       type2aelembytes(op->type()),
2265                       op->klass()->as_register(),
2266                       *op->stub()->entry(),
2267                       op->zero_array());
2268   }
2269   __ bind(*op->stub()->continuation());
2270 }
2271 
2272 
2273 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2274                                         ciMethodData *md, ciProfileData *data,
2275                                         Register recv, Register tmp1, Label* update_done) {
2276   uint i;
2277   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2278     Label next_test;
2279     // See if the receiver is receiver[n].
2280     __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
2281     __ verify_klass_ptr(tmp1);
2282     __ cmpd(CR0, recv, tmp1);
2283     __ bne(CR0, next_test);
2284 
2285     __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2286     __ addi(tmp1, tmp1, DataLayout::counter_increment);
2287     __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2288     __ b(*update_done);
2289 
2290     __ bind(next_test);
2291   }
2292 
2293   // Didn't find receiver; find next empty slot and fill it in.
2294   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2295     Label next_test;
2296     __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
2297     __ cmpdi(CR0, tmp1, 0);
2298     __ bne(CR0, next_test);
2299     __ li(tmp1, DataLayout::counter_increment);
2300     __ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
2301     __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2302     __ b(*update_done);
2303 
2304     __ bind(next_test);
2305   }
2306 }
2307 
2308 
2309 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2310                                     ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2311   md = method->method_data_or_null();
2312   assert(md != nullptr, "Sanity");
2313   data = md->bci_to_data(bci);
2314   assert(data != nullptr,       "need data for checkcast");
2315   assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2316   if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2317     // The offset is large so bias the mdo by the base of the slot so
2318     // that the ld can use simm16s to reference the slots of the data.
2319     mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2320   }
2321 }
2322 
2323 
2324 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2325   const Register obj = op->object()->as_register(); // Needs to live in this register at safepoint (patching stub).
2326   Register k_RInfo = op->tmp1()->as_register();
2327   Register klass_RInfo = op->tmp2()->as_register();
2328   Register Rtmp1 = op->tmp3()->as_register();
2329   Register dst = op->result_opr()->as_register();
2330   ciKlass* k = op->klass();
2331   bool should_profile = op->should_profile();
2332   // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps.
2333   bool reg_conflict = false;
2334   if (obj == k_RInfo) {
2335     k_RInfo = dst;
2336     reg_conflict = true;
2337   } else if (obj == klass_RInfo) {
2338     klass_RInfo = dst;
2339     reg_conflict = true;
2340   } else if (obj == Rtmp1) {
2341     Rtmp1 = dst;
2342     reg_conflict = true;
2343   }
2344   assert_different_registers(obj, k_RInfo, klass_RInfo, Rtmp1);
2345 
2346   ciMethodData* md = nullptr;
2347   ciProfileData* data = nullptr;
2348   int mdo_offset_bias = 0;
2349   if (should_profile) {
2350     ciMethod* method = op->profiled_method();
2351     assert(method != nullptr, "Should have method");
2352     setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2353 
2354     Register mdo      = k_RInfo;
2355     Register data_val = Rtmp1;
2356     Label not_null;
2357     metadata2reg(md->constant_encoding(), mdo);
2358     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2359     __ cmpdi(CR0, obj, 0);
2360     __ bne(CR0, not_null);
2361     __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2362     __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2363     __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2364     __ b(*obj_is_null);
2365     __ bind(not_null);
2366 
2367     Label update_done;
2368     Register recv = klass_RInfo;
2369     __ load_klass(recv, obj);
2370     type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, &update_done);
2371     const int slot_offset = md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias;
2372     __ ld(Rtmp1, slot_offset, mdo);
2373     __ addi(Rtmp1, Rtmp1, DataLayout::counter_increment);
2374     __ std(Rtmp1, slot_offset, mdo);
2375     __ bind(update_done);
2376   } else {
2377     __ cmpdi(CR0, obj, 0);
2378     __ beq(CR0, *obj_is_null);
2379   }
2380 
2381   // get object class
2382   __ load_klass(klass_RInfo, obj);
2383 
2384   if (k->is_loaded()) {
2385     metadata2reg(k->constant_encoding(), k_RInfo);
2386   } else {
2387     klass2reg_with_patching(k_RInfo, op->info_for_patch());
2388   }
2389 
2390   if (op->fast_check()) {
2391     assert_different_registers(klass_RInfo, k_RInfo);
2392     __ cmpd(CR0, k_RInfo, klass_RInfo);
2393     __ beq(CR0, *success);
2394     // Fall through to failure case.
2395   } else {
2396     bool need_slow_path = true;
2397     if (k->is_loaded()) {
2398       if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) {
2399         need_slow_path = false;
2400       }
2401       // Perform the fast part of the checking logic.
2402       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success : nullptr),
2403                                        failure, nullptr, RegisterOrConstant(k->super_check_offset()));
2404     } else {
2405       // Perform the fast part of the checking logic.
2406       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success, failure);
2407     }
2408     if (!need_slow_path) {
2409       __ b(*success);
2410     } else {
2411       // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2412       address entry = Runtime1::entry_for(StubId::c1_slow_subtype_check_id);
2413       // Stub needs fixed registers (tmp1-3).
2414       Register original_k_RInfo = op->tmp1()->as_register();
2415       Register original_klass_RInfo = op->tmp2()->as_register();
2416       Register original_Rtmp1 = op->tmp3()->as_register();
2417       bool keep_obj_alive = reg_conflict && (op->code() == lir_checkcast);
2418       if (keep_obj_alive && (obj != original_Rtmp1)) { __ mr(R0, obj); }
2419       __ mr_if_needed(original_k_RInfo, k_RInfo);
2420       __ mr_if_needed(original_klass_RInfo, klass_RInfo);
2421       if (keep_obj_alive) { __ mr(dst, (obj == original_Rtmp1) ? obj : R0); }
2422       //__ load_const_optimized(original_Rtmp1, entry, R0);
2423       __ calculate_address_from_global_toc(original_Rtmp1, entry, true, true, false);
2424       __ mtctr(original_Rtmp1);
2425       __ bctrl(); // sets CR0
2426       if (keep_obj_alive) { __ mr(obj, dst); }
2427       __ beq(CR0, *success);
2428       // Fall through to failure case.
2429     }
2430   }
2431 
2432   __ bind(*failure);
2433 }
2434 
2435 
2436 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2437   LIR_Code code = op->code();
2438   if (code == lir_store_check) {
2439     Register value = op->object()->as_register();
2440     Register array = op->array()->as_register();
2441     Register k_RInfo = op->tmp1()->as_register();
2442     Register klass_RInfo = op->tmp2()->as_register();
2443     Register Rtmp1 = op->tmp3()->as_register();
2444     bool should_profile = op->should_profile();
2445 
2446     __ verify_oop(value, FILE_AND_LINE);
2447     CodeStub* stub = op->stub();
2448     // Check if it needs to be profiled.
2449     ciMethodData* md = nullptr;
2450     ciProfileData* data = nullptr;
2451     int mdo_offset_bias = 0;
2452     if (should_profile) {
2453       ciMethod* method = op->profiled_method();
2454       assert(method != nullptr, "Should have method");
2455       setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2456     }
2457 
2458     Label done;
2459 
2460     if (should_profile) {
2461       Label not_null;
2462       Register mdo      = k_RInfo;
2463       Register data_val = Rtmp1;
2464       metadata2reg(md->constant_encoding(), mdo);
2465       __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2466       __ cmpdi(CR0, value, 0);
2467       __ bne(CR0, not_null);
2468       __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2469       __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2470       __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2471       __ b(done);
2472       __ bind(not_null);
2473 
2474       Label update_done;
2475       Register recv = klass_RInfo;
2476       __ load_klass(recv, value);
2477       type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, &update_done);
2478       const int slot_offset = md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias;
2479       __ ld(Rtmp1, slot_offset, mdo);
2480       __ addi(Rtmp1, Rtmp1, DataLayout::counter_increment);
2481       __ std(Rtmp1, slot_offset, mdo);
2482       __ bind(update_done);
2483     } else {
2484       __ cmpdi(CR0, value, 0);
2485       __ beq(CR0, done);
2486     }
2487     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2488       explicit_null_check(array, op->info_for_exception());
2489     } else {
2490       add_debug_info_for_null_check_here(op->info_for_exception());
2491     }
2492     __ load_klass(k_RInfo, array);
2493     __ load_klass(klass_RInfo, value);
2494 
2495     Label failure;
2496 
2497     // Get instance klass.
2498     __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo);
2499     // Perform the fast part of the checking logic.
2500     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, &done, &failure, nullptr);
2501 
2502     // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2503     const address slow_path = Runtime1::entry_for(StubId::c1_slow_subtype_check_id);
2504     //__ load_const_optimized(R0, slow_path);
2505     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path));
2506     __ mtctr(R0);
2507     __ bctrl(); // sets CR0
2508     __ beq(CR0, done);
2509 
2510     __ bind(failure);
2511     __ b(*stub->entry());
2512     __ align(32, 12);
2513     __ bind(done);
2514 
2515   } else if (code == lir_checkcast) {
2516     Label success, failure;
2517     emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success);
2518     __ b(*op->stub()->entry());
2519     __ align(32, 12);
2520     __ bind(success);
2521     __ mr_if_needed(op->result_opr()->as_register(), op->object()->as_register());
2522   } else if (code == lir_instanceof) {
2523     Register dst = op->result_opr()->as_register();
2524     Label success, failure, done;
2525     emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure);
2526     __ li(dst, 0);
2527     __ b(done);
2528     __ align(32, 12);
2529     __ bind(success);
2530     __ li(dst, 1);
2531     __ bind(done);
2532   } else {
2533     ShouldNotReachHere();
2534   }
2535 }
2536 
2537 
2538 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2539   Register addr = op->addr()->as_pointer_register();
2540   Register cmp_value = noreg, new_value = noreg;
2541   bool is_64bit = false;
2542 
2543   if (op->code() == lir_cas_long) {
2544     cmp_value = op->cmp_value()->as_register_lo();
2545     new_value = op->new_value()->as_register_lo();
2546     is_64bit = true;
2547   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2548     cmp_value = op->cmp_value()->as_register();
2549     new_value = op->new_value()->as_register();
2550     if (op->code() == lir_cas_obj) {
2551       if (UseCompressedOops) {
2552         Register t1 = op->tmp1()->as_register();
2553         Register t2 = op->tmp2()->as_register();
2554         cmp_value = __ encode_heap_oop(t1, cmp_value);
2555         new_value = __ encode_heap_oop(t2, new_value);
2556       } else {
2557         is_64bit = true;
2558       }
2559     }
2560   } else {
2561     Unimplemented();
2562   }
2563 
2564   // There might be a volatile load before this Unsafe CAS.
2565   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2566     __ sync();
2567   } else {
2568     __ lwsync();
2569   }
2570 
2571   if (is_64bit) {
2572     __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,
2573                 MacroAssembler::MemBarNone,
2574                 MacroAssembler::cmpxchgx_hint_atomic_update(),
2575                 noreg, nullptr, /*check without ldarx first*/true);
2576   } else {
2577     __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,
2578                 MacroAssembler::MemBarNone,
2579                 MacroAssembler::cmpxchgx_hint_atomic_update(),
2580                 noreg, nullptr, /*check without ldarx first*/true);
2581   }
2582 
2583   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2584     __ isync();
2585   } else {
2586     __ sync();
2587   }
2588 }
2589 
2590 void LIR_Assembler::breakpoint() {
2591   __ illtrap();
2592 }
2593 
2594 
2595 void LIR_Assembler::push(LIR_Opr opr) {
2596   Unimplemented();
2597 }
2598 
2599 void LIR_Assembler::pop(LIR_Opr opr) {
2600   Unimplemented();
2601 }
2602 
2603 
2604 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2605   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2606   Register dst = dst_opr->as_register();
2607   Register reg = mon_addr.base();
2608   int offset = mon_addr.disp();
2609   // Compute pointer to BasicLock.
2610   __ add_const_optimized(dst, reg, offset);
2611 }
2612 
2613 
2614 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2615   Register obj = op->obj_opr()->as_register();
2616   Register hdr = op->hdr_opr()->as_register();
2617   Register lock = op->lock_opr()->as_register();
2618 
2619   // Obj may not be an oop.
2620   if (op->code() == lir_lock) {
2621     MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2622     // Add debug info for NullPointerException only if one is possible.
2623     if (op->info() != nullptr) {
2624       if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2625         explicit_null_check(obj, op->info());
2626       } else {
2627         add_debug_info_for_null_check_here(op->info());
2628       }
2629     }
2630     __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2631   } else {
2632     assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2633     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2634   }
2635   __ bind(*op->stub()->continuation());
2636 }
2637 
2638 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2639   Register obj = op->obj()->as_pointer_register();
2640   Register result = op->result_opr()->as_pointer_register();
2641 
2642   CodeEmitInfo* info = op->info();
2643   if (info != nullptr) {
2644     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2645       explicit_null_check(obj, info);
2646     } else {
2647       add_debug_info_for_null_check_here(info);
2648     }
2649   }
2650 
2651   __ load_klass(result, obj);
2652 }
2653 
2654 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2655   ciMethod* method = op->profiled_method();
2656   int bci          = op->profiled_bci();
2657   ciMethod* callee = op->profiled_callee();
2658 
2659   // Update counter for all call types.
2660   ciMethodData* md = method->method_data_or_null();
2661   assert(md != nullptr, "Sanity");
2662   ciProfileData* data = md->bci_to_data(bci);
2663   assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2664   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2665   Register mdo = op->mdo()->as_register();
2666 #ifdef _LP64
2667   assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2668   Register tmp1 = op->tmp1()->as_register_lo();
2669 #else
2670   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2671   Register tmp1 = op->tmp1()->as_register();
2672 #endif
2673   metadata2reg(md->constant_encoding(), mdo);
2674   int mdo_offset_bias = 0;
2675   if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2676                             data->size_in_bytes())) {
2677     // The offset is large so bias the mdo by the base of the slot so
2678     // that the ld can use simm16s to reference the slots of the data.
2679     mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2680     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2681   }
2682 
2683   // Perform additional virtual call profiling for invokevirtual and
2684   // invokeinterface bytecodes
2685   if (op->should_profile_receiver_type()) {
2686     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2687     Register recv = op->recv()->as_register();
2688     assert_different_registers(mdo, tmp1, recv);
2689     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2690     ciKlass* known_klass = op->known_holder();
2691     if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2692       // We know the type that will be seen at this call site; we can
2693       // statically update the MethodData* rather than needing to do
2694       // dynamic tests on the receiver type.
2695 
2696       // NOTE: we should probably put a lock around this search to
2697       // avoid collisions by concurrent compilations.
2698       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2699       uint i;
2700       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2701         ciKlass* receiver = vc_data->receiver(i);
2702         if (known_klass->equals(receiver)) {
2703           __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2704           __ addi(tmp1, tmp1, DataLayout::counter_increment);
2705           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2706           return;
2707         }
2708       }
2709 
2710       // Receiver type not found in profile data; select an empty slot.
2711 
2712       // Note that this is less efficient than it should be because it
2713       // always does a write to the receiver part of the
2714       // VirtualCallData rather than just the first time.
2715       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2716         ciKlass* receiver = vc_data->receiver(i);
2717         if (receiver == nullptr) {
2718           metadata2reg(known_klass->constant_encoding(), tmp1);
2719           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo);
2720 
2721           __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2722           __ addi(tmp1, tmp1, DataLayout::counter_increment);
2723           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2724           return;
2725         }
2726       }
2727     } else {
2728       __ load_klass(recv, recv);
2729       Label update_done;
2730       type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
2731       // Receiver did not match any saved receiver and there is no empty row for it.
2732       // Increment total counter to indicate polymorphic case.
2733       __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2734       __ addi(tmp1, tmp1, DataLayout::counter_increment);
2735       __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2736 
2737       __ bind(update_done);
2738     }
2739   } else {
2740     // Static call
2741     __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2742     __ addi(tmp1, tmp1, DataLayout::counter_increment);
2743     __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2744   }
2745 }
2746 
2747 
2748 void LIR_Assembler::align_backward_branch_target() {
2749   __ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary.
2750 }
2751 
2752 
2753 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2754   // tmp must be unused
2755   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2756   assert(left->is_register(), "can only handle registers");
2757 
2758   if (left->is_single_cpu()) {
2759     __ neg(dest->as_register(), left->as_register());
2760   } else if (left->is_single_fpu()) {
2761     __ fneg(dest->as_float_reg(), left->as_float_reg());
2762   } else if (left->is_double_fpu()) {
2763     __ fneg(dest->as_double_reg(), left->as_double_reg());
2764   } else {
2765     assert (left->is_double_cpu(), "Must be a long");
2766     __ neg(dest->as_register_lo(), left->as_register_lo());
2767   }
2768 }
2769 
2770 
2771 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
2772                             const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2773   // Stubs: Called via rt_call, but dest is a stub address (no FunctionDescriptor).
2774   if (dest == Runtime1::entry_for(StubId::c1_register_finalizer_id) ||
2775       dest == Runtime1::entry_for(StubId::c1_new_multi_array_id   ) ||
2776       dest == Runtime1::entry_for(StubId::c1_is_instance_of_id    )) {
2777     assert(CodeCache::contains(dest), "simplified call is only for special C1 stubs");
2778     //__ load_const_optimized(R0, dest);
2779     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest));
2780     __ mtctr(R0);
2781     __ bctrl();
2782     if (info != nullptr) {
2783       add_call_info_here(info);
2784       __ post_call_nop();
2785     }
2786     return;
2787   }
2788 
2789   __ call_c(dest, relocInfo::runtime_call_type);
2790   assert(__ last_calls_return_pc() == __ pc(), "pcn not at return pc");
2791   if (info != nullptr) {
2792     add_call_info_here(info);
2793     __ post_call_nop();
2794   }
2795 }
2796 
2797 
2798 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2799   ShouldNotReachHere(); // Not needed on _LP64.
2800 }
2801 
2802 void LIR_Assembler::membar() {
2803   __ fence();
2804 }
2805 
2806 void LIR_Assembler::membar_acquire() {
2807   __ acquire();
2808 }
2809 
2810 void LIR_Assembler::membar_release() {
2811   __ release();
2812 }
2813 
2814 void LIR_Assembler::membar_loadload() {
2815   __ membar(Assembler::LoadLoad);
2816 }
2817 
2818 void LIR_Assembler::membar_storestore() {
2819   __ membar(Assembler::StoreStore);
2820 }
2821 
2822 void LIR_Assembler::membar_loadstore() {
2823   __ membar(Assembler::LoadStore);
2824 }
2825 
2826 void LIR_Assembler::membar_storeload() {
2827   __ membar(Assembler::StoreLoad);
2828 }
2829 
2830 void LIR_Assembler::on_spin_wait() {
2831   Unimplemented();
2832 }
2833 
2834 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2835   LIR_Address* addr = addr_opr->as_address_ptr();
2836   assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform");
2837 
2838   if (addr->index()->is_illegal()) {
2839     if (patch_code != lir_patch_none) {
2840       PatchingStub* patch = new PatchingStub(_masm, PatchingStub::access_field_id);
2841       __ load_const32(R0, 0); // patchable int
2842       __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), R0);
2843       patching_epilog(patch, patch_code, addr->base()->as_register(), info);
2844     } else {
2845       __ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp());
2846     }
2847   } else {
2848     assert(patch_code == lir_patch_none, "Patch code not supported");
2849     assert(addr->disp() == 0, "can't have both: index and disp");
2850     __ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register());
2851   }
2852 }
2853 
2854 
2855 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2856   ShouldNotReachHere();
2857 }
2858 
2859 
2860 #ifdef ASSERT
2861 // Emit run-time assertion.
2862 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2863   Unimplemented();
2864 }
2865 #endif
2866 
2867 
2868 void LIR_Assembler::peephole(LIR_List* lir) {
2869   // Optimize instruction pairs before emitting.
2870   LIR_OpList* inst = lir->instructions_list();
2871   for (int i = 1; i < inst->length(); i++) {
2872     LIR_Op* op = inst->at(i);
2873 
2874     // 2 register-register-moves
2875     if (op->code() == lir_move) {
2876       LIR_Opr in2  = ((LIR_Op1*)op)->in_opr(),
2877               res2 = ((LIR_Op1*)op)->result_opr();
2878       if (in2->is_register() && res2->is_register()) {
2879         LIR_Op* prev = inst->at(i - 1);
2880         if (prev && prev->code() == lir_move) {
2881           LIR_Opr in1  = ((LIR_Op1*)prev)->in_opr(),
2882                   res1 = ((LIR_Op1*)prev)->result_opr();
2883           if (in1->is_same_register(res2) && in2->is_same_register(res1)) {
2884             inst->remove_at(i);
2885           }
2886         }
2887       }
2888     }
2889 
2890   }
2891   return;
2892 }
2893 
2894 
2895 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
2896   const LIR_Address *addr = src->as_address_ptr();
2897   assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!");
2898   const Register Rptr = addr->base()->as_pointer_register(),
2899                  Rtmp = tmp->as_register();
2900   Register Robj = noreg;
2901   if (data->is_oop()) {
2902     if (UseCompressedOops) {
2903       Robj = __ encode_heap_oop(Rtmp, data->as_register());
2904     } else {
2905       Robj = data->as_register();
2906       if (Robj == dest->as_register()) { // May happen with ZGC.
2907         __ mr(Rtmp, Robj);
2908         Robj = Rtmp;
2909       }
2910     }
2911   }
2912 
2913   // There might be a volatile load before this Unsafe OP.
2914   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2915     __ sync();
2916   } else {
2917     __ lwsync();
2918   }
2919 
2920   Label Lretry;
2921   __ bind(Lretry);
2922 
2923   if (data->type() == T_INT) {
2924     const Register Rold = dest->as_register(),
2925                    Rsrc = data->as_register();
2926     assert_different_registers(Rptr, Rtmp, Rold, Rsrc);
2927     __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
2928     if (code == lir_xadd) {
2929       __ add(Rtmp, Rsrc, Rold);
2930       __ stwcx_(Rtmp, Rptr);
2931     } else {
2932       __ stwcx_(Rsrc, Rptr);
2933     }
2934   } else if (data->is_oop()) {
2935     assert(code == lir_xchg, "xadd for oops");
2936     const Register Rold = dest->as_register();
2937     assert_different_registers(Rptr, Rold, Robj);
2938     if (UseCompressedOops) {
2939       __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
2940       __ stwcx_(Robj, Rptr);
2941     } else {
2942       __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
2943       __ stdcx_(Robj, Rptr);
2944     }
2945   } else if (data->type() == T_LONG) {
2946     const Register Rold = dest->as_register_lo(),
2947                    Rsrc = data->as_register_lo();
2948     assert_different_registers(Rptr, Rtmp, Rold, Rsrc);
2949     __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
2950     if (code == lir_xadd) {
2951       __ add(Rtmp, Rsrc, Rold);
2952       __ stdcx_(Rtmp, Rptr);
2953     } else {
2954       __ stdcx_(Rsrc, Rptr);
2955     }
2956   } else {
2957     ShouldNotReachHere();
2958   }
2959 
2960   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
2961     __ bne_predict_not_taken(CR0, Lretry);
2962   } else {
2963     __ bne(                  CR0, Lretry);
2964   }
2965 
2966   if (UseCompressedOops && data->is_oop()) {
2967     __ decode_heap_oop(dest->as_register());
2968   }
2969 
2970   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2971     __ isync();
2972   } else {
2973     __ sync();
2974   }
2975 }
2976 
2977 
2978 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2979   Register obj = op->obj()->as_register();
2980   Register tmp = op->tmp()->as_pointer_register();
2981   LIR_Address* mdo_addr = op->mdp()->as_address_ptr();
2982   ciKlass* exact_klass = op->exact_klass();
2983   intptr_t current_klass = op->current_klass();
2984   bool not_null = op->not_null();
2985   bool no_conflict = op->no_conflict();
2986 
2987   Label Lupdate, Ldo_update, Ldone;
2988 
2989   bool do_null = !not_null;
2990   bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2991   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2992 
2993   assert(do_null || do_update, "why are we here?");
2994   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2995 
2996   __ verify_oop(obj, FILE_AND_LINE);
2997 
2998   if (do_null) {
2999     if (!TypeEntries::was_null_seen(current_klass)) {
3000       __ cmpdi(CR0, obj, 0);
3001       __ bne(CR0, Lupdate);
3002       __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3003       __ ori(R0, R0, TypeEntries::null_seen);
3004       if (do_update) {
3005         __ b(Ldo_update);
3006       } else {
3007         __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3008       }
3009     } else {
3010       if (do_update) {
3011         __ cmpdi(CR0, obj, 0);
3012         __ beq(CR0, Ldone);
3013       }
3014     }
3015 #ifdef ASSERT
3016   } else {
3017     __ cmpdi(CR0, obj, 0);
3018     __ bne(CR0, Lupdate);
3019     __ stop("unexpected null obj");
3020 #endif
3021   }
3022 
3023   __ bind(Lupdate);
3024   if (do_update) {
3025     Label Lnext;
3026     const Register klass = R29_TOC; // kill and reload
3027     bool klass_reg_used = false;
3028 #ifdef ASSERT
3029     if (exact_klass != nullptr) {
3030       Label ok;
3031       klass_reg_used = true;
3032       __ load_klass(klass, obj);
3033       metadata2reg(exact_klass->constant_encoding(), R0);
3034       __ cmpd(CR0, klass, R0);
3035       __ beq(CR0, ok);
3036       __ stop("exact klass and actual klass differ");
3037       __ bind(ok);
3038     }
3039 #endif
3040 
3041     if (!no_conflict) {
3042       if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
3043         klass_reg_used = true;
3044         if (exact_klass != nullptr) {
3045           __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3046           metadata2reg(exact_klass->constant_encoding(), klass);
3047         } else {
3048           __ load_klass(klass, obj);
3049           __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj
3050         }
3051 
3052         // Like InterpreterMacroAssembler::profile_obj_type
3053         __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
3054         // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
3055         __ cmpd(CR1, R0, klass);
3056         // Klass seen before, nothing to do (regardless of unknown bit).
3057         //beq(CR1, do_nothing);
3058 
3059         __ andi_(R0, tmp, TypeEntries::type_unknown);
3060         // Already unknown. Nothing to do anymore.
3061         //bne(CR0, do_nothing);
3062         __ crorc(CR0, Assembler::equal, CR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
3063         __ beq(CR0, Lnext);
3064 
3065         if (TypeEntries::is_type_none(current_klass)) {
3066           __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
3067           __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
3068           __ beq(CR0, Ldo_update); // First time here. Set profile type.
3069         }
3070 
3071       } else {
3072         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3073                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3074 
3075         __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3076         __ andi_(R0, tmp, TypeEntries::type_unknown);
3077         // Already unknown. Nothing to do anymore.
3078         __ bne(CR0, Lnext);
3079       }
3080 
3081       // Different than before. Cannot keep accurate profile.
3082       __ ori(R0, tmp, TypeEntries::type_unknown);
3083     } else {
3084       // There's a single possible klass at this profile point
3085       assert(exact_klass != nullptr, "should be");
3086       __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3087 
3088       if (TypeEntries::is_type_none(current_klass)) {
3089         klass_reg_used = true;
3090         metadata2reg(exact_klass->constant_encoding(), klass);
3091 
3092         __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
3093         // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
3094         __ cmpd(CR1, R0, klass);
3095         // Klass seen before, nothing to do (regardless of unknown bit).
3096         __ beq(CR1, Lnext);
3097 #ifdef ASSERT
3098         {
3099           Label ok;
3100           __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
3101           __ beq(CR0, ok); // First time here.
3102 
3103           __ stop("unexpected profiling mismatch");
3104           __ bind(ok);
3105         }
3106 #endif
3107         // First time here. Set profile type.
3108         __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
3109       } else {
3110         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3111                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3112 
3113         // Already unknown. Nothing to do anymore.
3114         __ andi_(R0, tmp, TypeEntries::type_unknown);
3115         __ bne(CR0, Lnext);
3116 
3117         // Different than before. Cannot keep accurate profile.
3118         __ ori(R0, tmp, TypeEntries::type_unknown);
3119       }
3120     }
3121 
3122     __ bind(Ldo_update);
3123     __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3124 
3125     __ bind(Lnext);
3126     if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit
3127   }
3128   __ bind(Ldone);
3129 }
3130 
3131 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3132   Unimplemented();
3133 }
3134 
3135 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3136   assert(op->crc()->is_single_cpu(), "crc must be register");
3137   assert(op->val()->is_single_cpu(), "byte value must be register");
3138   assert(op->result_opr()->is_single_cpu(), "result must be register");
3139   Register crc = op->crc()->as_register();
3140   Register val = op->val()->as_register();
3141   Register res = op->result_opr()->as_register();
3142 
3143   assert_different_registers(val, crc, res);
3144 
3145   __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);
3146   __ kernel_crc32_singleByteReg(crc, val, res, true);
3147   __ mr(res, crc);
3148 }
3149 
3150 #undef __