1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "gc/shared/collectedHeap.hpp"
  36 #include "memory/universe.hpp"
  37 #include "nativeInst_ppc.hpp"
  38 #include "oops/compressedOops.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/vm_version.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 
  47 #define __ _masm->
  48 
  49 
  50 const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5;
  51 
  52 
  53 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
  54   Unimplemented(); return false; // Currently not used on this platform.
  55 }
  56 
  57 
  58 LIR_Opr LIR_Assembler::receiverOpr() {
  59   return FrameMap::R3_oop_opr;
  60 }
  61 
  62 
  63 LIR_Opr LIR_Assembler::osrBufferPointer() {
  64   return FrameMap::R3_opr;
  65 }
  66 
  67 
  68 // This specifies the stack pointer decrement needed to build the frame.
  69 int LIR_Assembler::initial_frame_size_in_bytes() const {
  70   return in_bytes(frame_map()->framesize_in_bytes());
  71 }
  72 
  73 
  74 // Inline cache check: the inline cached class is in inline_cache_reg;
  75 // we fetch the class of the receiver and compare it with the cached class.
  76 // If they do not match we jump to slow case.
  77 int LIR_Assembler::check_icache() {
  78   int offset = __ offset();
  79   __ inline_cache_check(R3_ARG1, R19_inline_cache_reg);
  80   return offset;
  81 }
  82 
  83 void LIR_Assembler::clinit_barrier(ciMethod* method) {
  84   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
  85 
  86   Label L_skip_barrier;
  87   Register klass = R20;
  88 
  89   metadata2reg(method->holder()->constant_encoding(), klass);
  90   __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
  91 
  92   __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
  93   __ mtctr(klass);
  94   __ bctr();
  95 
  96   __ bind(L_skip_barrier);
  97 }
  98 
  99 void LIR_Assembler::osr_entry() {
 100   // On-stack-replacement entry sequence:
 101   //
 102   //   1. Create a new compiled activation.
 103   //   2. Initialize local variables in the compiled activation. The expression
 104   //      stack must be empty at the osr_bci; it is not initialized.
 105   //   3. Jump to the continuation address in compiled code to resume execution.
 106 
 107   // OSR entry point
 108   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 109   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 110   ValueStack* entry_state = osr_entry->end()->state();
 111   int number_of_locks = entry_state->locks_size();
 112 
 113   // Create a frame for the compiled activation.
 114   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 115 
 116   // OSR buffer is
 117   //
 118   // locals[nlocals-1..0]
 119   // monitors[number_of_locks-1..0]
 120   //
 121   // Locals is a direct copy of the interpreter frame so in the osr buffer
 122   // the first slot in the local array is the last local from the interpreter
 123   // and the last slot is local[0] (receiver) from the interpreter.
 124   //
 125   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 126   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 127   // in the interpreter frame (the method lock if a sync method).
 128 
 129   // Initialize monitors in the compiled activation.
 130   //   R3: pointer to osr buffer
 131   //
 132   // All other registers are dead at this point and the locals will be
 133   // copied into place by code emitted in the IR.
 134 
 135   Register OSR_buf = osrBufferPointer()->as_register();
 136   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 137     int monitor_offset = BytesPerWord * method()->max_locals() +
 138       (2 * BytesPerWord) * (number_of_locks - 1);
 139     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 140     // the OSR buffer using 2 word entries: first the lock and then
 141     // the oop.
 142     for (int i = 0; i < number_of_locks; i++) {
 143       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 144 #ifdef ASSERT
 145       // Verify the interpreter's monitor has a non-null object.
 146       {
 147         Label L;
 148         __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 149         __ cmpdi(CCR0, R0, 0);
 150         __ bne(CCR0, L);
 151         __ stop("locked object is NULL");
 152         __ bind(L);
 153       }
 154 #endif // ASSERT
 155       // Copy the lock field into the compiled activation.
 156       Address ml = frame_map()->address_for_monitor_lock(i),
 157               mo = frame_map()->address_for_monitor_object(i);
 158       assert(ml.index() == noreg && mo.index() == noreg, "sanity");
 159       __ ld(R0, slot_offset + 0, OSR_buf);
 160       __ std(R0, ml.disp(), ml.base());
 161       __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 162       __ std(R0, mo.disp(), mo.base());
 163     }
 164   }
 165 }
 166 
 167 
 168 int LIR_Assembler::emit_exception_handler() {
 169   // If the last instruction is a call (typically to do a throw which
 170   // is coming at the end after block reordering) the return address
 171   // must still point into the code area in order to avoid assertion
 172   // failures when searching for the corresponding bci => add a nop
 173   // (was bug 5/14/1999 - gri).
 174   __ nop();
 175 
 176   // Generate code for the exception handler.
 177   address handler_base = __ start_a_stub(exception_handler_size());
 178 
 179   if (handler_base == NULL) {
 180     // Not enough space left for the handler.
 181     bailout("exception handler overflow");
 182     return -1;
 183   }
 184 
 185   int offset = code_offset();
 186   address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
 187   //__ load_const_optimized(R0, entry_point);
 188   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));
 189   __ mtctr(R0);
 190   __ bctr();
 191 
 192   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 193   __ end_a_stub();
 194 
 195   return offset;
 196 }
 197 
 198 
 199 // Emit the code to remove the frame from the stack in the exception
 200 // unwind path.
 201 int LIR_Assembler::emit_unwind_handler() {
 202   _masm->block_comment("Unwind handler");
 203 
 204   int offset = code_offset();
 205   bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes();
 206   const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31;
 207 
 208   // Fetch the exception from TLS and clear out exception related thread state.
 209   __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 210   __ li(R0, 0);
 211   __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 212   __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
 213 
 214   __ bind(_unwind_handler_entry);
 215   __ verify_not_null_oop(Rexception);
 216   if (preserve_exception) { __ mr(Rexception_save, Rexception); }
 217 
 218   // Perform needed unlocking
 219   MonitorExitStub* stub = NULL;
 220   if (method()->is_synchronized()) {
 221     monitor_address(0, FrameMap::R4_opr);
 222     stub = new MonitorExitStub(FrameMap::R4_opr, true, 0);
 223     __ unlock_object(R5, R6, R4, *stub->entry());
 224     __ bind(*stub->continuation());
 225   }
 226 
 227   if (compilation()->env()->dtrace_method_probes()) {
 228     Unimplemented();
 229   }
 230 
 231   // Dispatch to the unwind logic.
 232   address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id);
 233   //__ load_const_optimized(R0, unwind_stub);
 234   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub));
 235   if (preserve_exception) { __ mr(Rexception, Rexception_save); }
 236   __ mtctr(R0);
 237   __ bctr();
 238 
 239   // Emit the slow path assembly.
 240   if (stub != NULL) {
 241     stub->emit_code(this);
 242   }
 243 
 244   return offset;
 245 }
 246 
 247 
 248 int LIR_Assembler::emit_deopt_handler() {
 249   // If the last instruction is a call (typically to do a throw which
 250   // is coming at the end after block reordering) the return address
 251   // must still point into the code area in order to avoid assertion
 252   // failures when searching for the corresponding bci => add a nop
 253   // (was bug 5/14/1999 - gri).
 254   __ nop();
 255 
 256   // Generate code for deopt handler.
 257   address handler_base = __ start_a_stub(deopt_handler_size());
 258 
 259   if (handler_base == NULL) {
 260     // Not enough space left for the handler.
 261     bailout("deopt handler overflow");
 262     return -1;
 263   }
 264 
 265   int offset = code_offset();
 266   __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type);
 267 
 268   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 269   __ end_a_stub();
 270 
 271   return offset;
 272 }
 273 
 274 
 275 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 276   if (o == NULL) {
 277     __ li(reg, 0);
 278   } else {
 279     AddressLiteral addrlit = __ constant_oop_address(o);
 280     __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg);
 281   }
 282 }
 283 
 284 
 285 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 286   // Allocate a new index in table to hold the object once it's been patched.
 287   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
 288   PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
 289 
 290   AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index));
 291   __ load_const(reg, addrlit, R0);
 292 
 293   patching_epilog(patch, lir_patch_normal, reg, info);
 294 }
 295 
 296 
 297 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
 298   AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation)
 299   __ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg);
 300 }
 301 
 302 
 303 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
 304   // Allocate a new index in table to hold the klass once it's been patched.
 305   int index = __ oop_recorder()->allocate_metadata_index(NULL);
 306   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
 307 
 308   AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index));
 309   assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
 310   __ load_const(reg, addrlit, R0);
 311 
 312   patching_epilog(patch, lir_patch_normal, reg, info);
 313 }
 314 
 315 
 316 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
 317   const bool is_int = result->is_single_cpu();
 318   Register Rdividend = is_int ? left->as_register() : left->as_register_lo();
 319   Register Rdivisor  = noreg;
 320   Register Rscratch  = temp->as_register();
 321   Register Rresult   = is_int ? result->as_register() : result->as_register_lo();
 322   long divisor = -1;
 323 
 324   if (right->is_register()) {
 325     Rdivisor = is_int ? right->as_register() : right->as_register_lo();
 326   } else {
 327     divisor = is_int ? right->as_constant_ptr()->as_jint()
 328                      : right->as_constant_ptr()->as_jlong();
 329   }
 330 
 331   assert(Rdividend != Rscratch, "");
 332   assert(Rdivisor  != Rscratch, "");
 333   assert(code == lir_idiv || code == lir_irem, "Must be irem or idiv");
 334 
 335   if (Rdivisor == noreg) {
 336     if (divisor == 1) { // stupid, but can happen
 337       if (code == lir_idiv) {
 338         __ mr_if_needed(Rresult, Rdividend);
 339       } else {
 340         __ li(Rresult, 0);
 341       }
 342 
 343     } else if (is_power_of_2(divisor)) {
 344       // Convert division by a power of two into some shifts and logical operations.
 345       int log2 = log2i_exact(divisor);
 346 
 347       // Round towards 0.
 348       if (divisor == 2) {
 349         if (is_int) {
 350           __ srwi(Rscratch, Rdividend, 31);
 351         } else {
 352           __ srdi(Rscratch, Rdividend, 63);
 353         }
 354       } else {
 355         if (is_int) {
 356           __ srawi(Rscratch, Rdividend, 31);
 357         } else {
 358           __ sradi(Rscratch, Rdividend, 63);
 359         }
 360         __ clrldi(Rscratch, Rscratch, 64-log2);
 361       }
 362       __ add(Rscratch, Rdividend, Rscratch);
 363 
 364       if (code == lir_idiv) {
 365         if (is_int) {
 366           __ srawi(Rresult, Rscratch, log2);
 367         } else {
 368           __ sradi(Rresult, Rscratch, log2);
 369         }
 370       } else { // lir_irem
 371         __ clrrdi(Rscratch, Rscratch, log2);
 372         __ sub(Rresult, Rdividend, Rscratch);
 373       }
 374 
 375     } else if (divisor == -1) {
 376       if (code == lir_idiv) {
 377         __ neg(Rresult, Rdividend);
 378       } else {
 379         __ li(Rresult, 0);
 380       }
 381 
 382     } else {
 383       __ load_const_optimized(Rscratch, divisor);
 384       if (code == lir_idiv) {
 385         if (is_int) {
 386           __ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1.
 387         } else {
 388           __ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1.
 389         }
 390       } else {
 391         assert(Rscratch != R0, "need both");
 392         if (is_int) {
 393           __ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1.
 394           __ mullw(Rscratch, R0, Rscratch);
 395         } else {
 396           __ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1.
 397           __ mulld(Rscratch, R0, Rscratch);
 398         }
 399         __ sub(Rresult, Rdividend, Rscratch);
 400       }
 401 
 402     }
 403     return;
 404   }
 405 
 406   Label regular, done;
 407   if (is_int) {
 408     __ cmpwi(CCR0, Rdivisor, -1);
 409   } else {
 410     __ cmpdi(CCR0, Rdivisor, -1);
 411   }
 412   __ bne(CCR0, regular);
 413   if (code == lir_idiv) {
 414     __ neg(Rresult, Rdividend);
 415     __ b(done);
 416     __ bind(regular);
 417     if (is_int) {
 418       __ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1.
 419     } else {
 420       __ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1.
 421     }
 422   } else { // lir_irem
 423     __ li(Rresult, 0);
 424     __ b(done);
 425     __ bind(regular);
 426     if (is_int) {
 427       __ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1.
 428       __ mullw(Rscratch, Rscratch, Rdivisor);
 429     } else {
 430       __ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1.
 431       __ mulld(Rscratch, Rscratch, Rdivisor);
 432     }
 433     __ sub(Rresult, Rdividend, Rscratch);
 434   }
 435   __ bind(done);
 436 }
 437 
 438 
 439 void LIR_Assembler::emit_op3(LIR_Op3* op) {
 440   switch (op->code()) {
 441   case lir_idiv:
 442   case lir_irem:
 443     arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(),
 444                     op->result_opr(), op->info());
 445     break;
 446   case lir_fmad:
 447     __ fmadd(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(),
 448              op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg());
 449     break;
 450   case lir_fmaf:
 451     __ fmadds(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(),
 452               op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg());
 453     break;
 454   default: ShouldNotReachHere(); break;
 455   }
 456 }
 457 
 458 
 459 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
 460 #ifdef ASSERT
 461   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
 462   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
 463   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
 464   assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
 465 #endif
 466 
 467   Label *L = op->label();
 468   if (op->cond() == lir_cond_always) {
 469     __ b(*L);
 470   } else {
 471     Label done;
 472     bool is_unordered = false;
 473     if (op->code() == lir_cond_float_branch) {
 474       assert(op->ublock() != NULL, "must have unordered successor");
 475       is_unordered = true;
 476     } else {
 477       assert(op->code() == lir_branch, "just checking");
 478     }
 479 
 480     bool positive = false;
 481     Assembler::Condition cond = Assembler::equal;
 482     switch (op->cond()) {
 483       case lir_cond_equal:        positive = true ; cond = Assembler::equal  ; is_unordered = false; break;
 484       case lir_cond_notEqual:     positive = false; cond = Assembler::equal  ; is_unordered = false; break;
 485       case lir_cond_less:         positive = true ; cond = Assembler::less   ; break;
 486       case lir_cond_belowEqual:   assert(op->code() != lir_cond_float_branch, ""); // fallthru
 487       case lir_cond_lessEqual:    positive = false; cond = Assembler::greater; break;
 488       case lir_cond_greater:      positive = true ; cond = Assembler::greater; break;
 489       case lir_cond_aboveEqual:   assert(op->code() != lir_cond_float_branch, ""); // fallthru
 490       case lir_cond_greaterEqual: positive = false; cond = Assembler::less   ; break;
 491       default:                    ShouldNotReachHere();
 492     }
 493     int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
 494     int bi = Assembler::bi0(BOOL_RESULT, cond);
 495     if (is_unordered) {
 496       if (positive) {
 497         if (op->ublock() == op->block()) {
 498           __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L);
 499         }
 500       } else {
 501         if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); }
 502       }
 503     }
 504     __ bc_far_optimized(bo, bi, *L);
 505     __ bind(done);
 506   }
 507 }
 508 
 509 
 510 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
 511   Bytecodes::Code code = op->bytecode();
 512   LIR_Opr src = op->in_opr(),
 513           dst = op->result_opr();
 514 
 515   switch(code) {
 516     case Bytecodes::_i2l: {
 517       __ extsw(dst->as_register_lo(), src->as_register());
 518       break;
 519     }
 520     case Bytecodes::_l2i: {
 521       __ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage
 522       break;
 523     }
 524     case Bytecodes::_i2b: {
 525       __ extsb(dst->as_register(), src->as_register());
 526       break;
 527     }
 528     case Bytecodes::_i2c: {
 529       __ clrldi(dst->as_register(), src->as_register(), 64-16);
 530       break;
 531     }
 532     case Bytecodes::_i2s: {
 533       __ extsh(dst->as_register(), src->as_register());
 534       break;
 535     }
 536     case Bytecodes::_i2d:
 537     case Bytecodes::_l2d: {
 538       bool src_in_memory = !VM_Version::has_mtfprd();
 539       FloatRegister rdst = dst->as_double_reg();
 540       FloatRegister rsrc;
 541       if (src_in_memory) {
 542         rsrc = src->as_double_reg(); // via mem
 543       } else {
 544         // move src to dst register
 545         if (code == Bytecodes::_i2d) {
 546           __ mtfprwa(rdst, src->as_register());
 547         } else {
 548           __ mtfprd(rdst, src->as_register_lo());
 549         }
 550         rsrc = rdst;
 551       }
 552       __ fcfid(rdst, rsrc);
 553       break;
 554     }
 555     case Bytecodes::_i2f:
 556     case Bytecodes::_l2f: {
 557       bool src_in_memory = !VM_Version::has_mtfprd();
 558       FloatRegister rdst = dst->as_float_reg();
 559       FloatRegister rsrc;
 560       if (src_in_memory) {
 561         rsrc = src->as_double_reg(); // via mem
 562       } else {
 563         // move src to dst register
 564         if (code == Bytecodes::_i2f) {
 565           __ mtfprwa(rdst, src->as_register());
 566         } else {
 567           __ mtfprd(rdst, src->as_register_lo());
 568         }
 569         rsrc = rdst;
 570       }
 571       if (VM_Version::has_fcfids()) {
 572         __ fcfids(rdst, rsrc);
 573       } else {
 574         assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility");
 575         __ fcfid(rdst, rsrc);
 576         __ frsp(rdst, rdst);
 577       }
 578       break;
 579     }
 580     case Bytecodes::_f2d: {
 581       __ fmr_if_needed(dst->as_double_reg(), src->as_float_reg());
 582       break;
 583     }
 584     case Bytecodes::_d2f: {
 585       __ frsp(dst->as_float_reg(), src->as_double_reg());
 586       break;
 587     }
 588     case Bytecodes::_d2i:
 589     case Bytecodes::_f2i: {
 590       bool dst_in_memory = !VM_Version::has_mtfprd();
 591       FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg();
 592       Address       addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL;
 593       Label L;
 594       // Result must be 0 if value is NaN; test by comparing value to itself.
 595       __ fcmpu(CCR0, rsrc, rsrc);
 596       if (dst_in_memory) {
 597         __ li(R0, 0); // 0 in case of NAN
 598         __ std(R0, addr.disp(), addr.base());
 599       } else {
 600         __ li(dst->as_register(), 0);
 601       }
 602       __ bso(CCR0, L);
 603       __ fctiwz(rsrc, rsrc); // USE_KILL
 604       if (dst_in_memory) {
 605         __ stfd(rsrc, addr.disp(), addr.base());
 606       } else {
 607         __ mffprd(dst->as_register(), rsrc);
 608       }
 609       __ bind(L);
 610       break;
 611     }
 612     case Bytecodes::_d2l:
 613     case Bytecodes::_f2l: {
 614       bool dst_in_memory = !VM_Version::has_mtfprd();
 615       FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg();
 616       Address       addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL;
 617       Label L;
 618       // Result must be 0 if value is NaN; test by comparing value to itself.
 619       __ fcmpu(CCR0, rsrc, rsrc);
 620       if (dst_in_memory) {
 621         __ li(R0, 0); // 0 in case of NAN
 622         __ std(R0, addr.disp(), addr.base());
 623       } else {
 624         __ li(dst->as_register_lo(), 0);
 625       }
 626       __ bso(CCR0, L);
 627       __ fctidz(rsrc, rsrc); // USE_KILL
 628       if (dst_in_memory) {
 629         __ stfd(rsrc, addr.disp(), addr.base());
 630       } else {
 631         __ mffprd(dst->as_register_lo(), rsrc);
 632       }
 633       __ bind(L);
 634       break;
 635     }
 636 
 637     default: ShouldNotReachHere();
 638   }
 639 }
 640 
 641 
 642 void LIR_Assembler::align_call(LIR_Code) {
 643   // do nothing since all instructions are word aligned on ppc
 644 }
 645 
 646 
 647 bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) {
 648   int start_offset = __ offset();
 649   // Put the entry point as a constant into the constant pool.
 650   const address entry_point_toc_addr   = __ address_constant(target, RelocationHolder::none);
 651   if (entry_point_toc_addr == NULL) {
 652     bailout("const section overflow");
 653     return false;
 654   }
 655   const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
 656 
 657   // Emit the trampoline stub which will be related to the branch-and-link below.
 658   address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc);
 659   if (!stub) {
 660     bailout("no space for trampoline stub");
 661     return false;
 662   }
 663   return true;
 664 }
 665 
 666 
 667 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
 668   assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype");
 669 
 670   bool success = emit_trampoline_stub_for_call(op->addr());
 671   if (!success) { return; }
 672 
 673   __ relocate(rtype);
 674   // Note: At this point we do not have the address of the trampoline
 675   // stub, and the entry point might be too far away for bl, so __ pc()
 676   // serves as dummy and the bl will be patched later.
 677   __ code()->set_insts_mark();
 678   __ bl(__ pc());
 679   add_call_info(code_offset(), op->info());
 680 }
 681 
 682 
 683 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
 684   __ calculate_address_from_global_toc(R2_TOC, __ method_toc());
 685 
 686   // Virtual call relocation will point to ic load.
 687   address virtual_call_meta_addr = __ pc();
 688   // Load a clear inline cache.
 689   AddressLiteral empty_ic((address) Universe::non_oop_word());
 690   bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC);
 691   if (!success) {
 692     bailout("const section overflow");
 693     return;
 694   }
 695   // Call to fixup routine. Fixup routine uses ScopeDesc info
 696   // to determine who we intended to call.
 697   __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
 698 
 699   success = emit_trampoline_stub_for_call(op->addr(), R2_TOC);
 700   if (!success) { return; }
 701 
 702   // Note: At this point we do not have the address of the trampoline
 703   // stub, and the entry point might be too far away for bl, so __ pc()
 704   // serves as dummy and the bl will be patched later.
 705   __ bl(__ pc());
 706   add_call_info(code_offset(), op->info());
 707 }
 708 
 709 void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) {
 710   ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info);
 711   __ null_check(addr, stub->entry());
 712   append_code_stub(stub);
 713 }
 714 
 715 
 716 // Attention: caller must encode oop if needed
 717 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
 718   int store_offset;
 719   if (!Assembler::is_simm16(offset)) {
 720     // For offsets larger than a simm16 we setup the offset.
 721     assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case");
 722     __ load_const_optimized(R0, offset);
 723     store_offset = store(from_reg, base, R0, type, wide);
 724   } else {
 725     store_offset = code_offset();
 726     switch (type) {
 727       case T_BOOLEAN: // fall through
 728       case T_BYTE  : __ stb(from_reg->as_register(), offset, base); break;
 729       case T_CHAR  :
 730       case T_SHORT : __ sth(from_reg->as_register(), offset, base); break;
 731       case T_INT   : __ stw(from_reg->as_register(), offset, base); break;
 732       case T_LONG  : __ std(from_reg->as_register_lo(), offset, base); break;
 733       case T_ADDRESS:
 734       case T_METADATA: __ std(from_reg->as_register(), offset, base); break;
 735       case T_ARRAY : // fall through
 736       case T_OBJECT:
 737         {
 738           if (UseCompressedOops && !wide) {
 739             // Encoding done in caller
 740             __ stw(from_reg->as_register(), offset, base);
 741             __ verify_coop(from_reg->as_register(), FILE_AND_LINE);
 742           } else {
 743             __ std(from_reg->as_register(), offset, base);
 744             __ verify_oop(from_reg->as_register(), FILE_AND_LINE);
 745           }
 746           break;
 747         }
 748       case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break;
 749       case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break;
 750       default      : ShouldNotReachHere();
 751     }
 752   }
 753   return store_offset;
 754 }
 755 
 756 
 757 // Attention: caller must encode oop if needed
 758 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
 759   int store_offset = code_offset();
 760   switch (type) {
 761     case T_BOOLEAN: // fall through
 762     case T_BYTE  : __ stbx(from_reg->as_register(), base, disp); break;
 763     case T_CHAR  :
 764     case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break;
 765     case T_INT   : __ stwx(from_reg->as_register(), base, disp); break;
 766     case T_LONG  :
 767 #ifdef _LP64
 768       __ stdx(from_reg->as_register_lo(), base, disp);
 769 #else
 770       Unimplemented();
 771 #endif
 772       break;
 773     case T_ADDRESS:
 774       __ stdx(from_reg->as_register(), base, disp);
 775       break;
 776     case T_ARRAY : // fall through
 777     case T_OBJECT:
 778       {
 779         if (UseCompressedOops && !wide) {
 780           // Encoding done in caller.
 781           __ stwx(from_reg->as_register(), base, disp);
 782           __ verify_coop(from_reg->as_register(), FILE_AND_LINE); // kills R0
 783         } else {
 784           __ stdx(from_reg->as_register(), base, disp);
 785           __ verify_oop(from_reg->as_register(), FILE_AND_LINE); // kills R0
 786         }
 787         break;
 788       }
 789     case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break;
 790     case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break;
 791     default      : ShouldNotReachHere();
 792   }
 793   return store_offset;
 794 }
 795 
 796 
 797 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
 798   int load_offset;
 799   if (!Assembler::is_simm16(offset)) {
 800     // For offsets larger than a simm16 we setup the offset.
 801     __ load_const_optimized(R0, offset);
 802     load_offset = load(base, R0, to_reg, type, wide);
 803   } else {
 804     load_offset = code_offset();
 805     switch(type) {
 806       case T_BOOLEAN: // fall through
 807       case T_BYTE  :   __ lbz(to_reg->as_register(), offset, base);
 808                        __ extsb(to_reg->as_register(), to_reg->as_register()); break;
 809       case T_CHAR  :   __ lhz(to_reg->as_register(), offset, base); break;
 810       case T_SHORT :   __ lha(to_reg->as_register(), offset, base); break;
 811       case T_INT   :   __ lwa(to_reg->as_register(), offset, base); break;
 812       case T_LONG  :   __ ld(to_reg->as_register_lo(), offset, base); break;
 813       case T_METADATA: __ ld(to_reg->as_register(), offset, base); break;
 814       case T_ADDRESS:
 815         if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
 816           __ lwz(to_reg->as_register(), offset, base);
 817           __ decode_klass_not_null(to_reg->as_register());
 818         } else {
 819           __ ld(to_reg->as_register(), offset, base);
 820         }
 821         break;
 822       case T_ARRAY : // fall through
 823       case T_OBJECT:
 824         {
 825           if (UseCompressedOops && !wide) {
 826             __ lwz(to_reg->as_register(), offset, base);
 827             __ decode_heap_oop(to_reg->as_register());
 828           } else {
 829             __ ld(to_reg->as_register(), offset, base);
 830           }
 831           __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
 832           break;
 833         }
 834       case T_FLOAT:  __ lfs(to_reg->as_float_reg(), offset, base); break;
 835       case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break;
 836       default      : ShouldNotReachHere();
 837     }
 838   }
 839   return load_offset;
 840 }
 841 
 842 
 843 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
 844   int load_offset = code_offset();
 845   switch(type) {
 846     case T_BOOLEAN: // fall through
 847     case T_BYTE  :  __ lbzx(to_reg->as_register(), base, disp);
 848                     __ extsb(to_reg->as_register(), to_reg->as_register()); break;
 849     case T_CHAR  :  __ lhzx(to_reg->as_register(), base, disp); break;
 850     case T_SHORT :  __ lhax(to_reg->as_register(), base, disp); break;
 851     case T_INT   :  __ lwax(to_reg->as_register(), base, disp); break;
 852     case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break;
 853     case T_ARRAY : // fall through
 854     case T_OBJECT:
 855       {
 856         if (UseCompressedOops && !wide) {
 857           __ lwzx(to_reg->as_register(), base, disp);
 858           __ decode_heap_oop(to_reg->as_register());
 859         } else {
 860           __ ldx(to_reg->as_register(), base, disp);
 861         }
 862         __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
 863         break;
 864       }
 865     case T_FLOAT:  __ lfsx(to_reg->as_float_reg() , base, disp); break;
 866     case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break;
 867     case T_LONG  :
 868 #ifdef _LP64
 869       __ ldx(to_reg->as_register_lo(), base, disp);
 870 #else
 871       Unimplemented();
 872 #endif
 873       break;
 874     default      : ShouldNotReachHere();
 875   }
 876   return load_offset;
 877 }
 878 
 879 
 880 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 881   LIR_Const* c = src->as_constant_ptr();
 882   Register src_reg = R0;
 883   switch (c->type()) {
 884     case T_INT:
 885     case T_FLOAT: {
 886       int value = c->as_jint_bits();
 887       __ load_const_optimized(src_reg, value);
 888       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 889       __ stw(src_reg, addr.disp(), addr.base());
 890       break;
 891     }
 892     case T_ADDRESS: {
 893       int value = c->as_jint_bits();
 894       __ load_const_optimized(src_reg, value);
 895       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 896       __ std(src_reg, addr.disp(), addr.base());
 897       break;
 898     }
 899     case T_OBJECT: {
 900       jobject2reg(c->as_jobject(), src_reg);
 901       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 902       __ std(src_reg, addr.disp(), addr.base());
 903       break;
 904     }
 905     case T_LONG:
 906     case T_DOUBLE: {
 907       int value = c->as_jlong_bits();
 908       __ load_const_optimized(src_reg, value);
 909       Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
 910       __ std(src_reg, addr.disp(), addr.base());
 911       break;
 912     }
 913     default:
 914       Unimplemented();
 915   }
 916 }
 917 
 918 
 919 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 920   LIR_Const* c = src->as_constant_ptr();
 921   LIR_Address* addr = dest->as_address_ptr();
 922   Register base = addr->base()->as_pointer_register();
 923   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 924   int offset = -1;
 925   // Null check for large offsets in LIRGenerator::do_StoreField.
 926   bool needs_explicit_null_check = !ImplicitNullChecks;
 927 
 928   if (info != NULL && needs_explicit_null_check) {
 929     explicit_null_check(base, info);
 930   }
 931 
 932   switch (c->type()) {
 933     case T_FLOAT: type = T_INT;
 934     case T_INT:
 935     case T_ADDRESS: {
 936       tmp = FrameMap::R0_opr;
 937       __ load_const_optimized(tmp->as_register(), c->as_jint_bits());
 938       break;
 939     }
 940     case T_DOUBLE: type = T_LONG;
 941     case T_LONG: {
 942       tmp = FrameMap::R0_long_opr;
 943       __ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits());
 944       break;
 945     }
 946     case T_OBJECT: {
 947       tmp = FrameMap::R0_opr;
 948       if (UseCompressedOops && !wide && c->as_jobject() != NULL) {
 949         AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject());
 950         __ lis(R0, oop_addr.value() >> 16); // Don't care about sign extend (will use stw).
 951         __ relocate(oop_addr.rspec(), /*compressed format*/ 1);
 952         __ ori(R0, R0, oop_addr.value() & 0xffff);
 953       } else {
 954         jobject2reg(c->as_jobject(), R0);
 955       }
 956       break;
 957     }
 958     default:
 959       Unimplemented();
 960   }
 961 
 962   // Handle either reg+reg or reg+disp address.
 963   if (addr->index()->is_valid()) {
 964     assert(addr->disp() == 0, "must be zero");
 965     offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
 966   } else {
 967     assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses");
 968     offset = store(tmp, base, addr->disp(), type, wide, false);
 969   }
 970 
 971   if (info != NULL) {
 972     assert(offset != -1, "offset should've been set");
 973     if (!needs_explicit_null_check) {
 974       add_debug_info_for_null_check(offset, info);
 975     }
 976   }
 977 }
 978 
 979 
 980 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 981   LIR_Const* c = src->as_constant_ptr();
 982   LIR_Opr to_reg = dest;
 983 
 984   switch (c->type()) {
 985     case T_INT: {
 986       assert(patch_code == lir_patch_none, "no patching handled here");
 987       __ load_const_optimized(dest->as_register(), c->as_jint(), R0);
 988       break;
 989     }
 990     case T_ADDRESS: {
 991       assert(patch_code == lir_patch_none, "no patching handled here");
 992       __ load_const_optimized(dest->as_register(), c->as_jint(), R0);  // Yes, as_jint ...
 993       break;
 994     }
 995     case T_LONG: {
 996       assert(patch_code == lir_patch_none, "no patching handled here");
 997       __ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0);
 998       break;
 999     }
1000 
1001     case T_OBJECT: {
1002       if (patch_code == lir_patch_none) {
1003         jobject2reg(c->as_jobject(), to_reg->as_register());
1004       } else {
1005         jobject2reg_with_patching(to_reg->as_register(), info);
1006       }
1007       break;
1008     }
1009 
1010     case T_METADATA:
1011       {
1012         if (patch_code == lir_patch_none) {
1013           metadata2reg(c->as_metadata(), to_reg->as_register());
1014         } else {
1015           klass2reg_with_patching(to_reg->as_register(), info);
1016         }
1017       }
1018       break;
1019 
1020     case T_FLOAT:
1021       {
1022         if (to_reg->is_single_fpu()) {
1023           address const_addr = __ float_constant(c->as_jfloat());
1024           if (const_addr == NULL) {
1025             bailout("const section overflow");
1026             break;
1027           }
1028           RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1029           __ relocate(rspec);
1030           __ load_const(R0, const_addr);
1031           __ lfsx(to_reg->as_float_reg(), R0);
1032         } else {
1033           assert(to_reg->is_single_cpu(), "Must be a cpu register.");
1034           __ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0);
1035         }
1036       }
1037       break;
1038 
1039     case T_DOUBLE:
1040       {
1041         if (to_reg->is_double_fpu()) {
1042           address const_addr = __ double_constant(c->as_jdouble());
1043           if (const_addr == NULL) {
1044             bailout("const section overflow");
1045             break;
1046           }
1047           RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1048           __ relocate(rspec);
1049           __ load_const(R0, const_addr);
1050           __ lfdx(to_reg->as_double_reg(), R0);
1051         } else {
1052           assert(to_reg->is_double_cpu(), "Must be a long register.");
1053           __ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0);
1054         }
1055       }
1056       break;
1057 
1058     default:
1059       ShouldNotReachHere();
1060   }
1061 }
1062 
1063 
1064 Address LIR_Assembler::as_Address(LIR_Address* addr) {
1065   Unimplemented(); return Address();
1066 }
1067 
1068 
1069 inline RegisterOrConstant index_or_disp(LIR_Address* addr) {
1070   if (addr->index()->is_illegal()) {
1071     return (RegisterOrConstant)(addr->disp());
1072   } else {
1073     return (RegisterOrConstant)(addr->index()->as_pointer_register());
1074   }
1075 }
1076 
1077 
1078 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1079   const Register tmp = R0;
1080   switch (type) {
1081     case T_INT:
1082     case T_FLOAT: {
1083       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1084       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1085       __ lwz(tmp, from.disp(), from.base());
1086       __ stw(tmp, to.disp(), to.base());
1087       break;
1088     }
1089     case T_ADDRESS:
1090     case T_OBJECT: {
1091       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1092       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1093       __ ld(tmp, from.disp(), from.base());
1094       __ std(tmp, to.disp(), to.base());
1095       break;
1096     }
1097     case T_LONG:
1098     case T_DOUBLE: {
1099       Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1100       Address to   = frame_map()->address_for_double_slot(dest->double_stack_ix());
1101       __ ld(tmp, from.disp(), from.base());
1102       __ std(tmp, to.disp(), to.base());
1103       break;
1104     }
1105 
1106     default:
1107       ShouldNotReachHere();
1108   }
1109 }
1110 
1111 
1112 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1113   Unimplemented(); return Address();
1114 }
1115 
1116 
1117 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1118   Unimplemented(); return Address();
1119 }
1120 
1121 
1122 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1123                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
1124 
1125   assert(type != T_METADATA, "load of metadata ptr not supported");
1126   LIR_Address* addr = src_opr->as_address_ptr();
1127   LIR_Opr to_reg = dest;
1128 
1129   Register src = addr->base()->as_pointer_register();
1130   Register disp_reg = noreg;
1131   int disp_value = addr->disp();
1132   bool needs_patching = (patch_code != lir_patch_none);
1133   // null check for large offsets in LIRGenerator::do_LoadField
1134   bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks;
1135 
1136   if (info != NULL && needs_explicit_null_check) {
1137     explicit_null_check(src, info);
1138   }
1139 
1140   if (addr->base()->type() == T_OBJECT) {
1141     __ verify_oop(src, FILE_AND_LINE);
1142   }
1143 
1144   PatchingStub* patch = NULL;
1145   if (needs_patching) {
1146     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1147     assert(!to_reg->is_double_cpu() ||
1148            patch_code == lir_patch_none ||
1149            patch_code == lir_patch_normal, "patching doesn't match register");
1150   }
1151 
1152   if (addr->index()->is_illegal()) {
1153     if (!Assembler::is_simm16(disp_value)) {
1154       if (needs_patching) {
1155         __ load_const32(R0, 0); // patchable int
1156       } else {
1157         __ load_const_optimized(R0, disp_value);
1158       }
1159       disp_reg = R0;
1160     }
1161   } else {
1162     disp_reg = addr->index()->as_pointer_register();
1163     assert(disp_value == 0, "can't handle 3 operand addresses");
1164   }
1165 
1166   // Remember the offset of the load. The patching_epilog must be done
1167   // before the call to add_debug_info, otherwise the PcDescs don't get
1168   // entered in increasing order.
1169   int offset;
1170 
1171   if (disp_reg == noreg) {
1172     assert(Assembler::is_simm16(disp_value), "should have set this up");
1173     offset = load(src, disp_value, to_reg, type, wide, unaligned);
1174   } else {
1175     assert(!unaligned, "unexpected");
1176     offset = load(src, disp_reg, to_reg, type, wide);
1177   }
1178 
1179   if (patch != NULL) {
1180     patching_epilog(patch, patch_code, src, info);
1181   }
1182   if (info != NULL && !needs_explicit_null_check) {
1183     add_debug_info_for_null_check(offset, info);
1184   }
1185 }
1186 
1187 
1188 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1189   Address addr;
1190   if (src->is_single_word()) {
1191     addr = frame_map()->address_for_slot(src->single_stack_ix());
1192   } else if (src->is_double_word())  {
1193     addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1194   }
1195 
1196   bool unaligned = addr.disp() % 8 != 0;
1197   load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
1198 }
1199 
1200 
1201 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1202   Address addr;
1203   if (dest->is_single_word()) {
1204     addr = frame_map()->address_for_slot(dest->single_stack_ix());
1205   } else if (dest->is_double_word())  {
1206     addr = frame_map()->address_for_slot(dest->double_stack_ix());
1207   }
1208   bool unaligned = addr.disp() % 8 != 0;
1209   store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
1210 }
1211 
1212 
1213 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1214   if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1215     if (from_reg->is_double_fpu()) {
1216       // double to double moves
1217       assert(to_reg->is_double_fpu(), "should match");
1218       __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg());
1219     } else {
1220       // float to float moves
1221       assert(to_reg->is_single_fpu(), "should match");
1222       __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg());
1223     }
1224   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1225     if (from_reg->is_double_cpu()) {
1226       __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register());
1227     } else if (to_reg->is_double_cpu()) {
1228       // int to int moves
1229       __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register());
1230     } else {
1231       // int to int moves
1232       __ mr_if_needed(to_reg->as_register(), from_reg->as_register());
1233     }
1234   } else {
1235     ShouldNotReachHere();
1236   }
1237   if (is_reference_type(to_reg->type())) {
1238     __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
1239   }
1240 }
1241 
1242 
1243 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1244                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1245                             bool wide, bool unaligned) {
1246   assert(type != T_METADATA, "store of metadata ptr not supported");
1247   LIR_Address* addr = dest->as_address_ptr();
1248 
1249   Register src = addr->base()->as_pointer_register();
1250   Register disp_reg = noreg;
1251   int disp_value = addr->disp();
1252   bool needs_patching = (patch_code != lir_patch_none);
1253   bool compress_oop = (is_reference_type(type)) && UseCompressedOops && !wide &&
1254                       CompressedOops::mode() != CompressedOops::UnscaledNarrowOop;
1255   bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value);
1256   bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29.
1257   // Null check for large offsets in LIRGenerator::do_StoreField.
1258   bool needs_explicit_null_check = !ImplicitNullChecks || use_R29;
1259 
1260   if (info != NULL && needs_explicit_null_check) {
1261     explicit_null_check(src, info);
1262   }
1263 
1264   if (addr->base()->is_oop_register()) {
1265     __ verify_oop(src, FILE_AND_LINE);
1266   }
1267 
1268   PatchingStub* patch = NULL;
1269   if (needs_patching) {
1270     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1271     assert(!from_reg->is_double_cpu() ||
1272            patch_code == lir_patch_none ||
1273            patch_code == lir_patch_normal, "patching doesn't match register");
1274   }
1275 
1276   if (addr->index()->is_illegal()) {
1277     if (load_disp) {
1278       disp_reg = use_R29 ? R29_TOC : R0;
1279       if (needs_patching) {
1280         __ load_const32(disp_reg, 0); // patchable int
1281       } else {
1282         __ load_const_optimized(disp_reg, disp_value);
1283       }
1284     }
1285   } else {
1286     disp_reg = addr->index()->as_pointer_register();
1287     assert(disp_value == 0, "can't handle 3 operand addresses");
1288   }
1289 
1290   // remember the offset of the store. The patching_epilog must be done
1291   // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1292   // entered in increasing order.
1293   int offset;
1294 
1295   if (compress_oop) {
1296     Register co = __ encode_heap_oop(R0, from_reg->as_register());
1297     from_reg = FrameMap::as_opr(co);
1298   }
1299 
1300   if (disp_reg == noreg) {
1301     assert(Assembler::is_simm16(disp_value), "should have set this up");
1302     offset = store(from_reg, src, disp_value, type, wide, unaligned);
1303   } else {
1304     assert(!unaligned, "unexpected");
1305     offset = store(from_reg, src, disp_reg, type, wide);
1306   }
1307 
1308   if (use_R29) {
1309     __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit
1310   }
1311 
1312   if (patch != NULL) {
1313     patching_epilog(patch, patch_code, src, info);
1314   }
1315 
1316   if (info != NULL && !needs_explicit_null_check) {
1317     add_debug_info_for_null_check(offset, info);
1318   }
1319 }
1320 
1321 
1322 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
1323   const Register return_pc = R31;  // Must survive C-call to enable_stack_reserved_zone().
1324   const Register temp      = R12;
1325 
1326   // Pop the stack before the safepoint code.
1327   int frame_size = initial_frame_size_in_bytes();
1328   if (Assembler::is_simm(frame_size, 16)) {
1329     __ addi(R1_SP, R1_SP, frame_size);
1330   } else {
1331     __ pop_frame();
1332   }
1333 
1334   // Restore return pc relative to callers' sp.
1335   __ ld(return_pc, _abi0(lr), R1_SP);
1336   // Move return pc to LR.
1337   __ mtlr(return_pc);
1338 
1339   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
1340     __ reserved_stack_check(return_pc);
1341   }
1342 
1343   // We need to mark the code position where the load from the safepoint
1344   // polling page was emitted as relocInfo::poll_return_type here.
1345   if (!UseSIGTRAP) {
1346     code_stub->set_safepoint_offset(__ offset());
1347     __ relocate(relocInfo::poll_return_type);
1348   }
1349   __ safepoint_poll(*code_stub->entry(), temp, true /* at_return */, true /* in_nmethod */);
1350 
1351   // Return.
1352   __ blr();
1353 }
1354 
1355 
1356 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1357   const Register poll_addr = tmp->as_register();
1358   __ ld(poll_addr, in_bytes(JavaThread::polling_page_offset()), R16_thread);
1359   if (info != NULL) {
1360     add_debug_info_for_branch(info);
1361   }
1362   int offset = __ offset();
1363   __ relocate(relocInfo::poll_type);
1364   __ load_from_polling_page(poll_addr);
1365 
1366   return offset;
1367 }
1368 
1369 
1370 void LIR_Assembler::emit_static_call_stub() {
1371   address call_pc = __ pc();
1372   address stub = __ start_a_stub(static_call_stub_size());
1373   if (stub == NULL) {
1374     bailout("static call stub overflow");
1375     return;
1376   }
1377 
1378   // For java_to_interp stubs we use R11_scratch1 as scratch register
1379   // and in call trampoline stubs we use R12_scratch2. This way we
1380   // can distinguish them (see is_NativeCallTrampolineStub_at()).
1381   const Register reg_scratch = R11_scratch1;
1382 
1383   // Create a static stub relocation which relates this stub
1384   // with the call instruction at insts_call_instruction_offset in the
1385   // instructions code-section.
1386   int start = __ offset();
1387   __ relocate(static_stub_Relocation::spec(call_pc));
1388 
1389   // Now, create the stub's code:
1390   // - load the TOC
1391   // - load the inline cache oop from the constant pool
1392   // - load the call target from the constant pool
1393   // - call
1394   __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
1395   AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
1396   bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true);
1397 
1398   if (ReoptimizeCallSequences) {
1399     __ b64_patchable((address)-1, relocInfo::none);
1400   } else {
1401     AddressLiteral a((address)-1);
1402     success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true);
1403     __ mtctr(reg_scratch);
1404     __ bctr();
1405   }
1406   if (!success) {
1407     bailout("const section overflow");
1408     return;
1409   }
1410 
1411   assert(__ offset() - start <= static_call_stub_size(), "stub too big");
1412   __ end_a_stub();
1413 }
1414 
1415 
1416 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1417   bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual);
1418   if (opr1->is_single_fpu()) {
1419     __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg());
1420   } else if (opr1->is_double_fpu()) {
1421     __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg());
1422   } else if (opr1->is_single_cpu()) {
1423     if (opr2->is_constant()) {
1424       switch (opr2->as_constant_ptr()->type()) {
1425         case T_INT:
1426           {
1427             jint con = opr2->as_constant_ptr()->as_jint();
1428             if (unsigned_comp) {
1429               if (Assembler::is_uimm(con, 16)) {
1430                 __ cmplwi(BOOL_RESULT, opr1->as_register(), con);
1431               } else {
1432                 __ load_const_optimized(R0, con);
1433                 __ cmplw(BOOL_RESULT, opr1->as_register(), R0);
1434               }
1435             } else {
1436               if (Assembler::is_simm(con, 16)) {
1437                 __ cmpwi(BOOL_RESULT, opr1->as_register(), con);
1438               } else {
1439                 __ load_const_optimized(R0, con);
1440                 __ cmpw(BOOL_RESULT, opr1->as_register(), R0);
1441               }
1442             }
1443           }
1444           break;
1445 
1446         case T_OBJECT:
1447           // There are only equal/notequal comparisons on objects.
1448           {
1449             assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1450             jobject con = opr2->as_constant_ptr()->as_jobject();
1451             if (con == NULL) {
1452               __ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
1453             } else {
1454               jobject2reg(con, R0);
1455               __ cmpd(BOOL_RESULT, opr1->as_register(), R0);
1456             }
1457           }
1458           break;
1459 
1460         case T_METADATA:
1461           // We only need, for now, comparison with NULL for metadata.
1462           {
1463             assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1464             Metadata* p = opr2->as_constant_ptr()->as_metadata();
1465             if (p == NULL) {
1466               __ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
1467             } else {
1468               ShouldNotReachHere();
1469             }
1470           }
1471           break;
1472 
1473         default:
1474           ShouldNotReachHere();
1475           break;
1476       }
1477     } else {
1478       assert(opr1->type() != T_ADDRESS && opr2->type() != T_ADDRESS, "currently unsupported");
1479       if (is_reference_type(opr1->type())) {
1480         // There are only equal/notequal comparisons on objects.
1481         assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1482         __ cmpd(BOOL_RESULT, opr1->as_register(), opr2->as_register());
1483       } else {
1484         if (unsigned_comp) {
1485           __ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register());
1486         } else {
1487           __ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register());
1488         }
1489       }
1490     }
1491   } else if (opr1->is_double_cpu()) {
1492     if (opr2->is_constant()) {
1493       jlong con = opr2->as_constant_ptr()->as_jlong();
1494       if (unsigned_comp) {
1495         if (Assembler::is_uimm(con, 16)) {
1496           __ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con);
1497         } else {
1498           __ load_const_optimized(R0, con);
1499           __ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0);
1500         }
1501       } else {
1502         if (Assembler::is_simm(con, 16)) {
1503           __ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con);
1504         } else {
1505           __ load_const_optimized(R0, con);
1506           __ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0);
1507         }
1508       }
1509     } else if (opr2->is_register()) {
1510       if (unsigned_comp) {
1511         __ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo());
1512       } else {
1513         __ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo());
1514       }
1515     } else {
1516       ShouldNotReachHere();
1517     }
1518   } else {
1519     ShouldNotReachHere();
1520   }
1521 }
1522 
1523 
1524 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1525   const Register Rdst = dst->as_register();
1526   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1527     bool is_unordered_less = (code == lir_ucmp_fd2i);
1528     if (left->is_single_fpu()) {
1529       __ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg());
1530     } else if (left->is_double_fpu()) {
1531       __ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg());
1532     } else {
1533       ShouldNotReachHere();
1534     }
1535     __ set_cmpu3(Rdst, is_unordered_less); // is_unordered_less ? -1 : 1
1536   } else if (code == lir_cmp_l2i) {
1537     __ cmpd(CCR0, left->as_register_lo(), right->as_register_lo());
1538     __ set_cmp3(Rdst);  // set result as follows: <: -1, =: 0, >: 1
1539   } else {
1540     ShouldNotReachHere();
1541   }
1542 }
1543 
1544 
1545 inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) {
1546   if (src->is_constant()) {
1547     lasm->const2reg(src, dst, lir_patch_none, NULL);
1548   } else if (src->is_register()) {
1549     lasm->reg2reg(src, dst);
1550   } else if (src->is_stack()) {
1551     lasm->stack2reg(src, dst, dst->type());
1552   } else {
1553     ShouldNotReachHere();
1554   }
1555 }
1556 
1557 
1558 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1559   if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) {
1560     load_to_reg(this, opr1, result); // Condition doesn't matter.
1561     return;
1562   }
1563 
1564   bool positive = false;
1565   Assembler::Condition cond = Assembler::equal;
1566   switch (condition) {
1567     case lir_cond_equal:        positive = true ; cond = Assembler::equal  ; break;
1568     case lir_cond_notEqual:     positive = false; cond = Assembler::equal  ; break;
1569     case lir_cond_less:         positive = true ; cond = Assembler::less   ; break;
1570     case lir_cond_belowEqual:
1571     case lir_cond_lessEqual:    positive = false; cond = Assembler::greater; break;
1572     case lir_cond_greater:      positive = true ; cond = Assembler::greater; break;
1573     case lir_cond_aboveEqual:
1574     case lir_cond_greaterEqual: positive = false; cond = Assembler::less   ; break;
1575     default:                    ShouldNotReachHere();
1576   }
1577 
1578   // Try to use isel on >=Power7.
1579   if (VM_Version::has_isel() && result->is_cpu_register()) {
1580     bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register();
1581     const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo();
1582 
1583     // We can use result_reg to load one operand if not already in register.
1584     Register first  = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg,
1585              second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg;
1586 
1587     if (first != second) {
1588       if (!o1_is_reg) {
1589         load_to_reg(this, opr1, result);
1590       }
1591 
1592       if (!o2_is_reg) {
1593         load_to_reg(this, opr2, result);
1594       }
1595 
1596       __ isel(result_reg, BOOL_RESULT, cond, !positive, first, second);
1597       return;
1598     }
1599   } // isel
1600 
1601   load_to_reg(this, opr1, result);
1602 
1603   Label skip;
1604   int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
1605   int bi = Assembler::bi0(BOOL_RESULT, cond);
1606   __ bc(bo, bi, skip);
1607 
1608   load_to_reg(this, opr2, result);
1609   __ bind(skip);
1610 }
1611 
1612 
1613 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
1614                              CodeEmitInfo* info, bool pop_fpu_stack) {
1615   assert(info == NULL, "unused on this code path");
1616   assert(left->is_register(), "wrong items state");
1617   assert(dest->is_register(), "wrong items state");
1618 
1619   if (right->is_register()) {
1620     if (dest->is_float_kind()) {
1621 
1622       FloatRegister lreg, rreg, res;
1623       if (right->is_single_fpu()) {
1624         lreg = left->as_float_reg();
1625         rreg = right->as_float_reg();
1626         res  = dest->as_float_reg();
1627         switch (code) {
1628           case lir_add: __ fadds(res, lreg, rreg); break;
1629           case lir_sub: __ fsubs(res, lreg, rreg); break;
1630           case lir_mul: __ fmuls(res, lreg, rreg); break;
1631           case lir_div: __ fdivs(res, lreg, rreg); break;
1632           default: ShouldNotReachHere();
1633         }
1634       } else {
1635         lreg = left->as_double_reg();
1636         rreg = right->as_double_reg();
1637         res  = dest->as_double_reg();
1638         switch (code) {
1639           case lir_add: __ fadd(res, lreg, rreg); break;
1640           case lir_sub: __ fsub(res, lreg, rreg); break;
1641           case lir_mul: __ fmul(res, lreg, rreg); break;
1642           case lir_div: __ fdiv(res, lreg, rreg); break;
1643           default: ShouldNotReachHere();
1644         }
1645       }
1646 
1647     } else if (dest->is_double_cpu()) {
1648 
1649       Register dst_lo = dest->as_register_lo();
1650       Register op1_lo = left->as_pointer_register();
1651       Register op2_lo = right->as_pointer_register();
1652 
1653       switch (code) {
1654         case lir_add: __ add(dst_lo, op1_lo, op2_lo); break;
1655         case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break;
1656         case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break;
1657         default: ShouldNotReachHere();
1658       }
1659     } else {
1660       assert (right->is_single_cpu(), "Just Checking");
1661 
1662       Register lreg = left->as_register();
1663       Register res  = dest->as_register();
1664       Register rreg = right->as_register();
1665       switch (code) {
1666         case lir_add:  __ add  (res, lreg, rreg); break;
1667         case lir_sub:  __ sub  (res, lreg, rreg); break;
1668         case lir_mul:  __ mullw(res, lreg, rreg); break;
1669         default: ShouldNotReachHere();
1670       }
1671     }
1672   } else {
1673     assert (right->is_constant(), "must be constant");
1674 
1675     if (dest->is_single_cpu()) {
1676       Register lreg = left->as_register();
1677       Register res  = dest->as_register();
1678       int    simm16 = right->as_constant_ptr()->as_jint();
1679 
1680       switch (code) {
1681         case lir_sub:  assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int
1682                        simm16 = -simm16;
1683         case lir_add:  if (res == lreg && simm16 == 0) break;
1684                        __ addi(res, lreg, simm16); break;
1685         case lir_mul:  if (res == lreg && simm16 == 1) break;
1686                        __ mulli(res, lreg, simm16); break;
1687         default: ShouldNotReachHere();
1688       }
1689     } else {
1690       Register lreg = left->as_pointer_register();
1691       Register res  = dest->as_register_lo();
1692       long con = right->as_constant_ptr()->as_jlong();
1693       assert(Assembler::is_simm16(con), "must be simm16");
1694 
1695       switch (code) {
1696         case lir_sub:  assert(Assembler::is_simm16(-con), "cannot encode");  // see do_ArithmeticOp_Long
1697                        con = -con;
1698         case lir_add:  if (res == lreg && con == 0) break;
1699                        __ addi(res, lreg, (int)con); break;
1700         case lir_mul:  if (res == lreg && con == 1) break;
1701                        __ mulli(res, lreg, (int)con); break;
1702         default: ShouldNotReachHere();
1703       }
1704     }
1705   }
1706 }
1707 
1708 
1709 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1710   switch (code) {
1711     case lir_sqrt: {
1712       __ fsqrt(dest->as_double_reg(), value->as_double_reg());
1713       break;
1714     }
1715     case lir_abs: {
1716       __ fabs(dest->as_double_reg(), value->as_double_reg());
1717       break;
1718     }
1719     default: {
1720       ShouldNotReachHere();
1721       break;
1722     }
1723   }
1724 }
1725 
1726 
1727 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1728   if (right->is_constant()) { // see do_LogicOp
1729     long uimm;
1730     Register d, l;
1731     if (dest->is_single_cpu()) {
1732       uimm = right->as_constant_ptr()->as_jint();
1733       d = dest->as_register();
1734       l = left->as_register();
1735     } else {
1736       uimm = right->as_constant_ptr()->as_jlong();
1737       d = dest->as_register_lo();
1738       l = left->as_register_lo();
1739     }
1740     long uimms  = (unsigned long)uimm >> 16,
1741          uimmss = (unsigned long)uimm >> 32;
1742 
1743     switch (code) {
1744       case lir_logic_and:
1745         if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2(uimm)) {
1746           __ andi(d, l, uimm); // special cases
1747         } else if (uimms != 0) { __ andis_(d, l, uimms); }
1748         else { __ andi_(d, l, uimm); }
1749         break;
1750 
1751       case lir_logic_or:
1752         if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); }
1753         else { __ ori(d, l, uimm); }
1754         break;
1755 
1756       case lir_logic_xor:
1757         if (uimm == -1) { __ nand(d, l, l); } // special case
1758         else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); }
1759         else { __ xori(d, l, uimm); }
1760         break;
1761 
1762       default: ShouldNotReachHere();
1763     }
1764   } else {
1765     assert(right->is_register(), "right should be in register");
1766 
1767     if (dest->is_single_cpu()) {
1768       switch (code) {
1769         case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break;
1770         case lir_logic_or:  __ orr (dest->as_register(), left->as_register(), right->as_register()); break;
1771         case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break;
1772         default: ShouldNotReachHere();
1773       }
1774     } else {
1775       Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
1776                                                                         left->as_register_lo();
1777       Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
1778                                                                           right->as_register_lo();
1779 
1780       switch (code) {
1781         case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break;
1782         case lir_logic_or:  __ orr (dest->as_register_lo(), l, r); break;
1783         case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break;
1784         default: ShouldNotReachHere();
1785       }
1786     }
1787   }
1788 }
1789 
1790 
1791 int LIR_Assembler::shift_amount(BasicType t) {
1792   int elem_size = type2aelembytes(t);
1793   switch (elem_size) {
1794     case 1 : return 0;
1795     case 2 : return 1;
1796     case 4 : return 2;
1797     case 8 : return 3;
1798   }
1799   ShouldNotReachHere();
1800   return -1;
1801 }
1802 
1803 
1804 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1805   info->add_register_oop(exceptionOop);
1806 
1807   // Reuse the debug info from the safepoint poll for the throw op itself.
1808   address pc_for_athrow = __ pc();
1809   int pc_for_athrow_offset = __ offset();
1810   //RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
1811   //__ relocate(rspec);
1812   //__ load_const(exceptionPC->as_register(), pc_for_athrow, R0);
1813   __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true);
1814   add_call_info(pc_for_athrow_offset, info); // for exception handler
1815 
1816   address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id
1817                                                                    : Runtime1::handle_exception_nofpu_id);
1818   //__ load_const_optimized(R0, stub);
1819   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
1820   __ mtctr(R0);
1821   __ bctr();
1822 }
1823 
1824 
1825 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1826   // Note: Not used with EnableDebuggingOnDemand.
1827   assert(exceptionOop->as_register() == R3, "should match");
1828   __ b(_unwind_handler_entry);
1829 }
1830 
1831 
1832 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
1833   Register src = op->src()->as_register();
1834   Register dst = op->dst()->as_register();
1835   Register src_pos = op->src_pos()->as_register();
1836   Register dst_pos = op->dst_pos()->as_register();
1837   Register length  = op->length()->as_register();
1838   Register tmp = op->tmp()->as_register();
1839   Register tmp2 = R0;
1840 
1841   int flags = op->flags();
1842   ciArrayKlass* default_type = op->expected_type();
1843   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
1844   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
1845 
1846   // Set up the arraycopy stub information.
1847   ArrayCopyStub* stub = op->stub();
1848   const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame.
1849 
1850   // Always do stub if no type information is available. It's ok if
1851   // the known type isn't loaded since the code sanity checks
1852   // in debug mode and the type isn't required when we know the exact type
1853   // also check that the type is an array type.
1854   if (op->expected_type() == NULL) {
1855     assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() &&
1856            length->is_nonvolatile(), "must preserve");
1857     address copyfunc_addr = StubRoutines::generic_arraycopy();
1858     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
1859 
1860     // 3 parms are int. Convert to long.
1861     __ mr(R3_ARG1, src);
1862     __ extsw(R4_ARG2, src_pos);
1863     __ mr(R5_ARG3, dst);
1864     __ extsw(R6_ARG4, dst_pos);
1865     __ extsw(R7_ARG5, length);
1866 
1867 #ifndef PRODUCT
1868     if (PrintC1Statistics) {
1869       address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
1870       int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
1871       __ lwz(R11_scratch1, simm16_offs, tmp);
1872       __ addi(R11_scratch1, R11_scratch1, 1);
1873       __ stw(R11_scratch1, simm16_offs, tmp);
1874     }
1875 #endif
1876     __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);
1877 
1878     __ nand(tmp, R3_RET, R3_RET);
1879     __ subf(length, tmp, length);
1880     __ add(src_pos, tmp, src_pos);
1881     __ add(dst_pos, tmp, dst_pos);
1882 
1883     __ cmpwi(CCR0, R3_RET, 0);
1884     __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry());
1885     __ bind(*stub->continuation());
1886     return;
1887   }
1888 
1889   assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
1890   Label cont, slow, copyfunc;
1891 
1892   bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check |
1893                                         LIR_OpArrayCopy::dst_null_check |
1894                                         LIR_OpArrayCopy::src_pos_positive_check |
1895                                         LIR_OpArrayCopy::dst_pos_positive_check |
1896                                         LIR_OpArrayCopy::length_positive_check);
1897 
1898   // Use only one conditional branch for simple checks.
1899   if (simple_check_flag_set) {
1900     ConditionRegister combined_check = CCR1, tmp_check = CCR1;
1901 
1902     // Make sure src and dst are non-null.
1903     if (flags & LIR_OpArrayCopy::src_null_check) {
1904       __ cmpdi(combined_check, src, 0);
1905       tmp_check = CCR0;
1906     }
1907 
1908     if (flags & LIR_OpArrayCopy::dst_null_check) {
1909       __ cmpdi(tmp_check, dst, 0);
1910       if (tmp_check != combined_check) {
1911         __ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal);
1912       }
1913       tmp_check = CCR0;
1914     }
1915 
1916     // Clear combined_check.eq if not already used.
1917     if (tmp_check == combined_check) {
1918       __ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal);
1919       tmp_check = CCR0;
1920     }
1921 
1922     if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
1923       // Test src_pos register.
1924       __ cmpwi(tmp_check, src_pos, 0);
1925       __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);
1926     }
1927 
1928     if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
1929       // Test dst_pos register.
1930       __ cmpwi(tmp_check, dst_pos, 0);
1931       __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);
1932     }
1933 
1934     if (flags & LIR_OpArrayCopy::length_positive_check) {
1935       // Make sure length isn't negative.
1936       __ cmpwi(tmp_check, length, 0);
1937       __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);
1938     }
1939 
1940     __ beq(combined_check, slow);
1941   }
1942 
1943   // If the compiler was not able to prove that exact type of the source or the destination
1944   // of the arraycopy is an array type, check at runtime if the source or the destination is
1945   // an instance type.
1946   if (flags & LIR_OpArrayCopy::type_check) {
1947     if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
1948       __ load_klass(tmp, dst);
1949       __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
1950       __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
1951       __ bge(CCR0, slow);
1952     }
1953 
1954     if (!(flags & LIR_OpArrayCopy::src_objarray)) {
1955       __ load_klass(tmp, src);
1956       __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
1957       __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
1958       __ bge(CCR0, slow);
1959     }
1960   }
1961 
1962   // Higher 32bits must be null.
1963   __ extsw(length, length);
1964 
1965   __ extsw(src_pos, src_pos);
1966   if (flags & LIR_OpArrayCopy::src_range_check) {
1967     __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src);
1968     __ add(tmp, length, src_pos);
1969     __ cmpld(CCR0, tmp2, tmp);
1970     __ ble(CCR0, slow);
1971   }
1972 
1973   __ extsw(dst_pos, dst_pos);
1974   if (flags & LIR_OpArrayCopy::dst_range_check) {
1975     __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst);
1976     __ add(tmp, length, dst_pos);
1977     __ cmpld(CCR0, tmp2, tmp);
1978     __ ble(CCR0, slow);
1979   }
1980 
1981   int shift = shift_amount(basic_type);
1982 
1983   if (!(flags & LIR_OpArrayCopy::type_check)) {
1984     __ b(cont);
1985   } else {
1986     // We don't know the array types are compatible.
1987     if (basic_type != T_OBJECT) {
1988       // Simple test for basic type arrays.
1989       if (UseCompressedClassPointers) {
1990         // We don't need decode because we just need to compare.
1991         __ lwz(tmp, oopDesc::klass_offset_in_bytes(), src);
1992         __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst);
1993         __ cmpw(CCR0, tmp, tmp2);
1994       } else {
1995         __ ld(tmp, oopDesc::klass_offset_in_bytes(), src);
1996         __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst);
1997         __ cmpd(CCR0, tmp, tmp2);
1998       }
1999       __ beq(CCR0, cont);
2000     } else {
2001       // For object arrays, if src is a sub class of dst then we can
2002       // safely do the copy.
2003       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2004 
2005       const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf
2006       assert_different_registers(tmp, tmp2, sub_klass, super_klass);
2007 
2008       __ load_klass(sub_klass, src);
2009       __ load_klass(super_klass, dst);
2010 
2011       __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2,
2012                                        &cont, copyfunc_addr != NULL ? &copyfunc : &slow, NULL);
2013 
2014       address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
2015       //__ load_const_optimized(tmp, slow_stc, tmp2);
2016       __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false);
2017       __ mtctr(tmp);
2018       __ bctrl(); // sets CR0
2019       __ beq(CCR0, cont);
2020 
2021       if (copyfunc_addr != NULL) { // Use stub if available.
2022         __ bind(copyfunc);
2023         // Src is not a sub class of dst so we have to do a
2024         // per-element check.
2025         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2026         if ((flags & mask) != mask) {
2027           assert(flags & mask, "one of the two should be known to be an object array");
2028 
2029           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2030             __ load_klass(tmp, src);
2031           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2032             __ load_klass(tmp, dst);
2033           }
2034 
2035           __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
2036 
2037           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2038           __ load_const_optimized(tmp, objArray_lh);
2039           __ cmpw(CCR0, tmp, tmp2);
2040           __ bne(CCR0, slow);
2041         }
2042 
2043         Register src_ptr = R3_ARG1;
2044         Register dst_ptr = R4_ARG2;
2045         Register len     = R5_ARG3;
2046         Register chk_off = R6_ARG4;
2047         Register super_k = R7_ARG5;
2048 
2049         __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2050         __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2051         if (shift == 0) {
2052           __ add(src_ptr, src_pos, src_ptr);
2053           __ add(dst_ptr, dst_pos, dst_ptr);
2054         } else {
2055           __ sldi(tmp, src_pos, shift);
2056           __ sldi(tmp2, dst_pos, shift);
2057           __ add(src_ptr, tmp, src_ptr);
2058           __ add(dst_ptr, tmp2, dst_ptr);
2059         }
2060 
2061         __ load_klass(tmp, dst);
2062         __ mr(len, length);
2063 
2064         int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2065         __ ld(super_k, ek_offset, tmp);
2066 
2067         int sco_offset = in_bytes(Klass::super_check_offset_offset());
2068         __ lwz(chk_off, sco_offset, super_k);
2069 
2070         __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);
2071 
2072 #ifndef PRODUCT
2073         if (PrintC1Statistics) {
2074           Label failed;
2075           __ cmpwi(CCR0, R3_RET, 0);
2076           __ bne(CCR0, failed);
2077           address counter = (address)&Runtime1::_arraycopy_checkcast_cnt;
2078           int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
2079           __ lwz(R11_scratch1, simm16_offs, tmp);
2080           __ addi(R11_scratch1, R11_scratch1, 1);
2081           __ stw(R11_scratch1, simm16_offs, tmp);
2082           __ bind(failed);
2083         }
2084 #endif
2085 
2086         __ nand(tmp, R3_RET, R3_RET);
2087         __ cmpwi(CCR0, R3_RET, 0);
2088         __ beq(CCR0, *stub->continuation());
2089 
2090 #ifndef PRODUCT
2091         if (PrintC1Statistics) {
2092           address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt;
2093           int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
2094           __ lwz(R11_scratch1, simm16_offs, tmp);
2095           __ addi(R11_scratch1, R11_scratch1, 1);
2096           __ stw(R11_scratch1, simm16_offs, tmp);
2097         }
2098 #endif
2099 
2100         __ subf(length, tmp, length);
2101         __ add(src_pos, tmp, src_pos);
2102         __ add(dst_pos, tmp, dst_pos);
2103       }
2104     }
2105   }
2106   __ bind(slow);
2107   __ b(*stub->entry());
2108   __ bind(cont);
2109 
2110 #ifdef ASSERT
2111   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2112     // Sanity check the known type with the incoming class. For the
2113     // primitive case the types must match exactly with src.klass and
2114     // dst.klass each exactly matching the default type. For the
2115     // object array case, if no type check is needed then either the
2116     // dst type is exactly the expected type and the src type is a
2117     // subtype which we can't check or src is the same array as dst
2118     // but not necessarily exactly of type default_type.
2119     Label known_ok, halt;
2120     metadata2reg(op->expected_type()->constant_encoding(), tmp);
2121     if (UseCompressedClassPointers) {
2122       // Tmp holds the default type. It currently comes uncompressed after the
2123       // load of a constant, so encode it.
2124       __ encode_klass_not_null(tmp);
2125       // Load the raw value of the dst klass, since we will be comparing
2126       // uncompressed values directly.
2127       __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst);
2128       __ cmpw(CCR0, tmp, tmp2);
2129       if (basic_type != T_OBJECT) {
2130         __ bne(CCR0, halt);
2131         // Load the raw value of the src klass.
2132         __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src);
2133         __ cmpw(CCR0, tmp, tmp2);
2134         __ beq(CCR0, known_ok);
2135       } else {
2136         __ beq(CCR0, known_ok);
2137         __ cmpw(CCR0, src, dst);
2138         __ beq(CCR0, known_ok);
2139       }
2140     } else {
2141       __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst);
2142       __ cmpd(CCR0, tmp, tmp2);
2143       if (basic_type != T_OBJECT) {
2144         __ bne(CCR0, halt);
2145         // Load the raw value of the src klass.
2146         __ ld(tmp2, oopDesc::klass_offset_in_bytes(), src);
2147         __ cmpd(CCR0, tmp, tmp2);
2148         __ beq(CCR0, known_ok);
2149       } else {
2150         __ beq(CCR0, known_ok);
2151         __ cmpd(CCR0, src, dst);
2152         __ beq(CCR0, known_ok);
2153       }
2154     }
2155     __ bind(halt);
2156     __ stop("incorrect type information in arraycopy");
2157     __ bind(known_ok);
2158   }
2159 #endif
2160 
2161 #ifndef PRODUCT
2162   if (PrintC1Statistics) {
2163     address counter = Runtime1::arraycopy_count_address(basic_type);
2164     int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
2165     __ lwz(R11_scratch1, simm16_offs, tmp);
2166     __ addi(R11_scratch1, R11_scratch1, 1);
2167     __ stw(R11_scratch1, simm16_offs, tmp);
2168   }
2169 #endif
2170 
2171   Register src_ptr = R3_ARG1;
2172   Register dst_ptr = R4_ARG2;
2173   Register len     = R5_ARG3;
2174 
2175   __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2176   __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2177   if (shift == 0) {
2178     __ add(src_ptr, src_pos, src_ptr);
2179     __ add(dst_ptr, dst_pos, dst_ptr);
2180   } else {
2181     __ sldi(tmp, src_pos, shift);
2182     __ sldi(tmp2, dst_pos, shift);
2183     __ add(src_ptr, tmp, src_ptr);
2184     __ add(dst_ptr, tmp2, dst_ptr);
2185   }
2186 
2187   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2188   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2189   const char *name;
2190   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2191 
2192   // Arraycopy stubs takes a length in number of elements, so don't scale it.
2193   __ mr(len, length);
2194   __ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0);
2195 
2196   __ bind(*stub->continuation());
2197 }
2198 
2199 
2200 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2201   if (dest->is_single_cpu()) {
2202     __ rldicl(tmp->as_register(), count->as_register(), 0, 64-5);
2203 #ifdef _LP64
2204     if (left->type() == T_OBJECT) {
2205       switch (code) {
2206         case lir_shl:  __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break;
2207         case lir_shr:  __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break;
2208         case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break;
2209         default: ShouldNotReachHere();
2210       }
2211     } else
2212 #endif
2213       switch (code) {
2214         case lir_shl:  __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break;
2215         case lir_shr:  __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break;
2216         case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break;
2217         default: ShouldNotReachHere();
2218       }
2219   } else {
2220     __ rldicl(tmp->as_register(), count->as_register(), 0, 64-6);
2221     switch (code) {
2222       case lir_shl:  __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;
2223       case lir_shr:  __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;
2224       case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;
2225       default: ShouldNotReachHere();
2226     }
2227   }
2228 }
2229 
2230 
2231 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2232 #ifdef _LP64
2233   if (left->type() == T_OBJECT) {
2234     count = count & 63;  // Shouldn't shift by more than sizeof(intptr_t).
2235     if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); }
2236     else {
2237       switch (code) {
2238         case lir_shl:  __ sldi(dest->as_register_lo(), left->as_register(), count); break;
2239         case lir_shr:  __ sradi(dest->as_register_lo(), left->as_register(), count); break;
2240         case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break;
2241         default: ShouldNotReachHere();
2242       }
2243     }
2244     return;
2245   }
2246 #endif
2247 
2248   if (dest->is_single_cpu()) {
2249     count = count & 0x1F; // Java spec
2250     if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); }
2251     else {
2252       switch (code) {
2253         case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break;
2254         case lir_shr:  __ srawi(dest->as_register(), left->as_register(), count); break;
2255         case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break;
2256         default: ShouldNotReachHere();
2257       }
2258     }
2259   } else if (dest->is_double_cpu()) {
2260     count = count & 63; // Java spec
2261     if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); }
2262     else {
2263       switch (code) {
2264         case lir_shl:  __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break;
2265         case lir_shr:  __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break;
2266         case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break;
2267         default: ShouldNotReachHere();
2268       }
2269     }
2270   } else {
2271     ShouldNotReachHere();
2272   }
2273 }
2274 
2275 
2276 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2277   if (op->init_check()) {
2278     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2279       explicit_null_check(op->klass()->as_register(), op->stub()->info());
2280     } else {
2281       add_debug_info_for_null_check_here(op->stub()->info());
2282     }
2283     __ lbz(op->tmp1()->as_register(),
2284            in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register());
2285     __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
2286     __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry());
2287   }
2288   __ allocate_object(op->obj()->as_register(),
2289                      op->tmp1()->as_register(),
2290                      op->tmp2()->as_register(),
2291                      op->tmp3()->as_register(),
2292                      op->header_size(),
2293                      op->object_size(),
2294                      op->klass()->as_register(),
2295                      *op->stub()->entry());
2296 
2297   __ bind(*op->stub()->continuation());
2298   __ verify_oop(op->obj()->as_register(), FILE_AND_LINE);
2299 }
2300 
2301 
2302 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2303   LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); )
2304   if (UseSlowPath ||
2305       (!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
2306       (!UseFastNewTypeArray   && (!is_reference_type(op->type())))) {
2307     __ b(*op->stub()->entry());
2308   } else {
2309     __ allocate_array(op->obj()->as_register(),
2310                       op->len()->as_register(),
2311                       op->tmp1()->as_register(),
2312                       op->tmp2()->as_register(),
2313                       op->tmp3()->as_register(),
2314                       arrayOopDesc::header_size(op->type()),
2315                       type2aelembytes(op->type()),
2316                       op->klass()->as_register(),
2317                       *op->stub()->entry());
2318   }
2319   __ bind(*op->stub()->continuation());
2320 }
2321 
2322 
2323 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2324                                         ciMethodData *md, ciProfileData *data,
2325                                         Register recv, Register tmp1, Label* update_done) {
2326   uint i;
2327   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2328     Label next_test;
2329     // See if the receiver is receiver[n].
2330     __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
2331     __ verify_klass_ptr(tmp1);
2332     __ cmpd(CCR0, recv, tmp1);
2333     __ bne(CCR0, next_test);
2334 
2335     __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2336     __ addi(tmp1, tmp1, DataLayout::counter_increment);
2337     __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2338     __ b(*update_done);
2339 
2340     __ bind(next_test);
2341   }
2342 
2343   // Didn't find receiver; find next empty slot and fill it in.
2344   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2345     Label next_test;
2346     __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
2347     __ cmpdi(CCR0, tmp1, 0);
2348     __ bne(CCR0, next_test);
2349     __ li(tmp1, DataLayout::counter_increment);
2350     __ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
2351     __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2352     __ b(*update_done);
2353 
2354     __ bind(next_test);
2355   }
2356 }
2357 
2358 
2359 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2360                                     ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2361   md = method->method_data_or_null();
2362   assert(md != NULL, "Sanity");
2363   data = md->bci_to_data(bci);
2364   assert(data != NULL,       "need data for checkcast");
2365   assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2366   if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2367     // The offset is large so bias the mdo by the base of the slot so
2368     // that the ld can use simm16s to reference the slots of the data.
2369     mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2370   }
2371 }
2372 
2373 
2374 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2375   const Register obj = op->object()->as_register(); // Needs to live in this register at safepoint (patching stub).
2376   Register k_RInfo = op->tmp1()->as_register();
2377   Register klass_RInfo = op->tmp2()->as_register();
2378   Register Rtmp1 = op->tmp3()->as_register();
2379   Register dst = op->result_opr()->as_register();
2380   ciKlass* k = op->klass();
2381   bool should_profile = op->should_profile();
2382   // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps.
2383   bool reg_conflict = false;
2384   if (obj == k_RInfo) {
2385     k_RInfo = dst;
2386     reg_conflict = true;
2387   } else if (obj == klass_RInfo) {
2388     klass_RInfo = dst;
2389     reg_conflict = true;
2390   } else if (obj == Rtmp1) {
2391     Rtmp1 = dst;
2392     reg_conflict = true;
2393   }
2394   assert_different_registers(obj, k_RInfo, klass_RInfo, Rtmp1);
2395 
2396   __ cmpdi(CCR0, obj, 0);
2397 
2398   ciMethodData* md = NULL;
2399   ciProfileData* data = NULL;
2400   int mdo_offset_bias = 0;
2401   if (should_profile) {
2402     ciMethod* method = op->profiled_method();
2403     assert(method != NULL, "Should have method");
2404     setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2405 
2406     Register mdo      = k_RInfo;
2407     Register data_val = Rtmp1;
2408     Label not_null;
2409     __ bne(CCR0, not_null);
2410     metadata2reg(md->constant_encoding(), mdo);
2411     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2412     __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2413     __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2414     __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2415     __ b(*obj_is_null);
2416     __ bind(not_null);
2417   } else {
2418     __ beq(CCR0, *obj_is_null);
2419   }
2420 
2421   // get object class
2422   __ load_klass(klass_RInfo, obj);
2423 
2424   if (k->is_loaded()) {
2425     metadata2reg(k->constant_encoding(), k_RInfo);
2426   } else {
2427     klass2reg_with_patching(k_RInfo, op->info_for_patch());
2428   }
2429 
2430   Label profile_cast_failure, failure_restore_obj, profile_cast_success;
2431   Label *failure_target = should_profile ? &profile_cast_failure : failure;
2432   Label *success_target = should_profile ? &profile_cast_success : success;
2433 
2434   if (op->fast_check()) {
2435     assert_different_registers(klass_RInfo, k_RInfo);
2436     __ cmpd(CCR0, k_RInfo, klass_RInfo);
2437     if (should_profile) {
2438       __ bne(CCR0, *failure_target);
2439       // Fall through to success case.
2440     } else {
2441       __ beq(CCR0, *success);
2442       // Fall through to failure case.
2443     }
2444   } else {
2445     bool need_slow_path = true;
2446     if (k->is_loaded()) {
2447       if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) {
2448         need_slow_path = false;
2449       }
2450       // Perform the fast part of the checking logic.
2451       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL),
2452                                        failure_target, NULL, RegisterOrConstant(k->super_check_offset()));
2453     } else {
2454       // Perform the fast part of the checking logic.
2455       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target);
2456     }
2457     if (!need_slow_path) {
2458       if (!should_profile) { __ b(*success); }
2459     } else {
2460       // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2461       address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
2462       // Stub needs fixed registers (tmp1-3).
2463       Register original_k_RInfo = op->tmp1()->as_register();
2464       Register original_klass_RInfo = op->tmp2()->as_register();
2465       Register original_Rtmp1 = op->tmp3()->as_register();
2466       bool keep_obj_alive = reg_conflict && (op->code() == lir_checkcast);
2467       bool keep_klass_RInfo_alive = (obj == original_klass_RInfo) && should_profile;
2468       if (keep_obj_alive && (obj != original_Rtmp1)) { __ mr(R0, obj); }
2469       __ mr_if_needed(original_k_RInfo, k_RInfo);
2470       __ mr_if_needed(original_klass_RInfo, klass_RInfo);
2471       if (keep_obj_alive) { __ mr(dst, (obj == original_Rtmp1) ? obj : R0); }
2472       //__ load_const_optimized(original_Rtmp1, entry, R0);
2473       __ calculate_address_from_global_toc(original_Rtmp1, entry, true, true, false);
2474       __ mtctr(original_Rtmp1);
2475       __ bctrl(); // sets CR0
2476       if (keep_obj_alive) {
2477         if (keep_klass_RInfo_alive) { __ mr(R0, obj); }
2478         __ mr(obj, dst);
2479       }
2480       if (should_profile) {
2481         __ bne(CCR0, *failure_target);
2482         if (keep_klass_RInfo_alive) { __ mr(klass_RInfo, keep_obj_alive ? R0 : obj); }
2483         // Fall through to success case.
2484       } else {
2485         __ beq(CCR0, *success);
2486         // Fall through to failure case.
2487       }
2488     }
2489   }
2490 
2491   if (should_profile) {
2492     Register mdo = k_RInfo, recv = klass_RInfo;
2493     assert_different_registers(mdo, recv, Rtmp1);
2494     __ bind(profile_cast_success);
2495     metadata2reg(md->constant_encoding(), mdo);
2496     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2497     type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success);
2498     __ b(*success);
2499 
2500     // Cast failure case.
2501     __ bind(profile_cast_failure);
2502     metadata2reg(md->constant_encoding(), mdo);
2503     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2504     __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2505     __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment);
2506     __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2507   }
2508 
2509   __ bind(*failure);
2510 }
2511 
2512 
2513 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2514   LIR_Code code = op->code();
2515   if (code == lir_store_check) {
2516     Register value = op->object()->as_register();
2517     Register array = op->array()->as_register();
2518     Register k_RInfo = op->tmp1()->as_register();
2519     Register klass_RInfo = op->tmp2()->as_register();
2520     Register Rtmp1 = op->tmp3()->as_register();
2521     bool should_profile = op->should_profile();
2522 
2523     __ verify_oop(value, FILE_AND_LINE);
2524     CodeStub* stub = op->stub();
2525     // Check if it needs to be profiled.
2526     ciMethodData* md = NULL;
2527     ciProfileData* data = NULL;
2528     int mdo_offset_bias = 0;
2529     if (should_profile) {
2530       ciMethod* method = op->profiled_method();
2531       assert(method != NULL, "Should have method");
2532       setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2533     }
2534     Label profile_cast_success, failure, done;
2535     Label *success_target = should_profile ? &profile_cast_success : &done;
2536 
2537     __ cmpdi(CCR0, value, 0);
2538     if (should_profile) {
2539       Label not_null;
2540       __ bne(CCR0, not_null);
2541       Register mdo      = k_RInfo;
2542       Register data_val = Rtmp1;
2543       metadata2reg(md->constant_encoding(), mdo);
2544       __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2545       __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2546       __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2547       __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2548       __ b(done);
2549       __ bind(not_null);
2550     } else {
2551       __ beq(CCR0, done);
2552     }
2553     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2554       explicit_null_check(array, op->info_for_exception());
2555     } else {
2556       add_debug_info_for_null_check_here(op->info_for_exception());
2557     }
2558     __ load_klass(k_RInfo, array);
2559     __ load_klass(klass_RInfo, value);
2560 
2561     // Get instance klass.
2562     __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo);
2563     // Perform the fast part of the checking logic.
2564     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL);
2565 
2566     // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2567     const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
2568     //__ load_const_optimized(R0, slow_path);
2569     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path));
2570     __ mtctr(R0);
2571     __ bctrl(); // sets CR0
2572     if (!should_profile) {
2573       __ beq(CCR0, done);
2574       __ bind(failure);
2575     } else {
2576       __ bne(CCR0, failure);
2577       // Fall through to the success case.
2578 
2579       Register mdo  = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2580       assert_different_registers(value, mdo, recv, tmp1);
2581       __ bind(profile_cast_success);
2582       metadata2reg(md->constant_encoding(), mdo);
2583       __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2584       __ load_klass(recv, value);
2585       type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
2586       __ b(done);
2587 
2588       // Cast failure case.
2589       __ bind(failure);
2590       metadata2reg(md->constant_encoding(), mdo);
2591       __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2592       Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2593       __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2594       __ addi(tmp1, tmp1, -DataLayout::counter_increment);
2595       __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2596     }
2597     __ b(*stub->entry());
2598     __ bind(done);
2599 
2600   } else if (code == lir_checkcast) {
2601     Label success, failure;
2602     emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success);
2603     __ b(*op->stub()->entry());
2604     __ align(32, 12);
2605     __ bind(success);
2606     __ mr_if_needed(op->result_opr()->as_register(), op->object()->as_register());
2607   } else if (code == lir_instanceof) {
2608     Register dst = op->result_opr()->as_register();
2609     Label success, failure, done;
2610     emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure);
2611     __ li(dst, 0);
2612     __ b(done);
2613     __ align(32, 12);
2614     __ bind(success);
2615     __ li(dst, 1);
2616     __ bind(done);
2617   } else {
2618     ShouldNotReachHere();
2619   }
2620 }
2621 
2622 
2623 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2624   Register addr = op->addr()->as_pointer_register();
2625   Register cmp_value = noreg, new_value = noreg;
2626   bool is_64bit = false;
2627 
2628   if (op->code() == lir_cas_long) {
2629     cmp_value = op->cmp_value()->as_register_lo();
2630     new_value = op->new_value()->as_register_lo();
2631     is_64bit = true;
2632   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2633     cmp_value = op->cmp_value()->as_register();
2634     new_value = op->new_value()->as_register();
2635     if (op->code() == lir_cas_obj) {
2636       if (UseCompressedOops) {
2637         Register t1 = op->tmp1()->as_register();
2638         Register t2 = op->tmp2()->as_register();
2639         cmp_value = __ encode_heap_oop(t1, cmp_value);
2640         new_value = __ encode_heap_oop(t2, new_value);
2641       } else {
2642         is_64bit = true;
2643       }
2644     }
2645   } else {
2646     Unimplemented();
2647   }
2648 
2649   if (is_64bit) {
2650     __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,
2651                 MacroAssembler::MemBarNone,
2652                 MacroAssembler::cmpxchgx_hint_atomic_update(),
2653                 noreg, NULL, /*check without ldarx first*/true);
2654   } else {
2655     __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,
2656                 MacroAssembler::MemBarNone,
2657                 MacroAssembler::cmpxchgx_hint_atomic_update(),
2658                 noreg, /*check without ldarx first*/true);
2659   }
2660 
2661   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2662     __ isync();
2663   } else {
2664     __ sync();
2665   }
2666 }
2667 
2668 void LIR_Assembler::breakpoint() {
2669   __ illtrap();
2670 }
2671 
2672 
2673 void LIR_Assembler::push(LIR_Opr opr) {
2674   Unimplemented();
2675 }
2676 
2677 void LIR_Assembler::pop(LIR_Opr opr) {
2678   Unimplemented();
2679 }
2680 
2681 
2682 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2683   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2684   Register dst = dst_opr->as_register();
2685   Register reg = mon_addr.base();
2686   int offset = mon_addr.disp();
2687   // Compute pointer to BasicLock.
2688   __ add_const_optimized(dst, reg, offset);
2689 }
2690 
2691 
2692 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2693   Register obj = op->obj_opr()->as_register();
2694   Register hdr = op->hdr_opr()->as_register();
2695   Register lock = op->lock_opr()->as_register();
2696 
2697   // Obj may not be an oop.
2698   if (op->code() == lir_lock) {
2699     MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2700     if (UseFastLocking) {
2701       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2702       // Add debug info for NullPointerException only if one is possible.
2703       if (op->info() != NULL) {
2704         if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2705           explicit_null_check(obj, op->info());
2706         } else {
2707           add_debug_info_for_null_check_here(op->info());
2708         }
2709       }
2710       __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2711     } else {
2712       // always do slow locking
2713       // note: The slow locking code could be inlined here, however if we use
2714       //       slow locking, speed doesn't matter anyway and this solution is
2715       //       simpler and requires less duplicated code - additionally, the
2716       //       slow locking code is the same in either case which simplifies
2717       //       debugging.
2718       __ b(*op->stub()->entry());
2719     }
2720   } else {
2721     assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2722     if (UseFastLocking) {
2723       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2724       __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2725     } else {
2726       // always do slow unlocking
2727       // note: The slow unlocking code could be inlined here, however if we use
2728       //       slow unlocking, speed doesn't matter anyway and this solution is
2729       //       simpler and requires less duplicated code - additionally, the
2730       //       slow unlocking code is the same in either case which simplifies
2731       //       debugging.
2732       __ b(*op->stub()->entry());
2733     }
2734   }
2735   __ bind(*op->stub()->continuation());
2736 }
2737 
2738 
2739 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2740   ciMethod* method = op->profiled_method();
2741   int bci          = op->profiled_bci();
2742   ciMethod* callee = op->profiled_callee();
2743 
2744   // Update counter for all call types.
2745   ciMethodData* md = method->method_data_or_null();
2746   assert(md != NULL, "Sanity");
2747   ciProfileData* data = md->bci_to_data(bci);
2748   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2749   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2750   Register mdo = op->mdo()->as_register();
2751 #ifdef _LP64
2752   assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2753   Register tmp1 = op->tmp1()->as_register_lo();
2754 #else
2755   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2756   Register tmp1 = op->tmp1()->as_register();
2757 #endif
2758   metadata2reg(md->constant_encoding(), mdo);
2759   int mdo_offset_bias = 0;
2760   if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2761                             data->size_in_bytes())) {
2762     // The offset is large so bias the mdo by the base of the slot so
2763     // that the ld can use simm16s to reference the slots of the data.
2764     mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2765     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2766   }
2767 
2768   // Perform additional virtual call profiling for invokevirtual and
2769   // invokeinterface bytecodes
2770   if (op->should_profile_receiver_type()) {
2771     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2772     Register recv = op->recv()->as_register();
2773     assert_different_registers(mdo, tmp1, recv);
2774     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2775     ciKlass* known_klass = op->known_holder();
2776     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2777       // We know the type that will be seen at this call site; we can
2778       // statically update the MethodData* rather than needing to do
2779       // dynamic tests on the receiver type.
2780 
2781       // NOTE: we should probably put a lock around this search to
2782       // avoid collisions by concurrent compilations.
2783       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2784       uint i;
2785       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2786         ciKlass* receiver = vc_data->receiver(i);
2787         if (known_klass->equals(receiver)) {
2788           __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2789           __ addi(tmp1, tmp1, DataLayout::counter_increment);
2790           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2791           return;
2792         }
2793       }
2794 
2795       // Receiver type not found in profile data; select an empty slot.
2796 
2797       // Note that this is less efficient than it should be because it
2798       // always does a write to the receiver part of the
2799       // VirtualCallData rather than just the first time.
2800       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2801         ciKlass* receiver = vc_data->receiver(i);
2802         if (receiver == NULL) {
2803           metadata2reg(known_klass->constant_encoding(), tmp1);
2804           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo);
2805 
2806           __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2807           __ addi(tmp1, tmp1, DataLayout::counter_increment);
2808           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2809           return;
2810         }
2811       }
2812     } else {
2813       __ load_klass(recv, recv);
2814       Label update_done;
2815       type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
2816       // Receiver did not match any saved receiver and there is no empty row for it.
2817       // Increment total counter to indicate polymorphic case.
2818       __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2819       __ addi(tmp1, tmp1, DataLayout::counter_increment);
2820       __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2821 
2822       __ bind(update_done);
2823     }
2824   } else {
2825     // Static call
2826     __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2827     __ addi(tmp1, tmp1, DataLayout::counter_increment);
2828     __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2829   }
2830 }
2831 
2832 
2833 void LIR_Assembler::align_backward_branch_target() {
2834   __ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary.
2835 }
2836 
2837 
2838 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
2839   Unimplemented();
2840 }
2841 
2842 
2843 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2844   // tmp must be unused
2845   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2846   assert(left->is_register(), "can only handle registers");
2847 
2848   if (left->is_single_cpu()) {
2849     __ neg(dest->as_register(), left->as_register());
2850   } else if (left->is_single_fpu()) {
2851     __ fneg(dest->as_float_reg(), left->as_float_reg());
2852   } else if (left->is_double_fpu()) {
2853     __ fneg(dest->as_double_reg(), left->as_double_reg());
2854   } else {
2855     assert (left->is_double_cpu(), "Must be a long");
2856     __ neg(dest->as_register_lo(), left->as_register_lo());
2857   }
2858 }
2859 
2860 
2861 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
2862                             const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2863   // Stubs: Called via rt_call, but dest is a stub address (no function descriptor).
2864   if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) ||
2865       dest == Runtime1::entry_for(Runtime1::new_multi_array_id   )) {
2866     //__ load_const_optimized(R0, dest);
2867     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest));
2868     __ mtctr(R0);
2869     __ bctrl();
2870     assert(info != NULL, "sanity");
2871     add_call_info_here(info);
2872     return;
2873   }
2874 
2875   __ call_c_with_frame_resize(dest, /*no resizing*/ 0);
2876   if (info != NULL) {
2877     add_call_info_here(info);
2878   }
2879 }
2880 
2881 
2882 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2883   ShouldNotReachHere(); // Not needed on _LP64.
2884 }
2885 
2886 void LIR_Assembler::membar() {
2887   __ fence();
2888 }
2889 
2890 void LIR_Assembler::membar_acquire() {
2891   __ acquire();
2892 }
2893 
2894 void LIR_Assembler::membar_release() {
2895   __ release();
2896 }
2897 
2898 void LIR_Assembler::membar_loadload() {
2899   __ membar(Assembler::LoadLoad);
2900 }
2901 
2902 void LIR_Assembler::membar_storestore() {
2903   __ membar(Assembler::StoreStore);
2904 }
2905 
2906 void LIR_Assembler::membar_loadstore() {
2907   __ membar(Assembler::LoadStore);
2908 }
2909 
2910 void LIR_Assembler::membar_storeload() {
2911   __ membar(Assembler::StoreLoad);
2912 }
2913 
2914 void LIR_Assembler::on_spin_wait() {
2915   Unimplemented();
2916 }
2917 
2918 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2919   LIR_Address* addr = addr_opr->as_address_ptr();
2920   assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform");
2921 
2922   if (addr->index()->is_illegal()) {
2923     if (patch_code != lir_patch_none) {
2924       PatchingStub* patch = new PatchingStub(_masm, PatchingStub::access_field_id);
2925       __ load_const32(R0, 0); // patchable int
2926       __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), R0);
2927       patching_epilog(patch, patch_code, addr->base()->as_register(), info);
2928     } else {
2929       __ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp());
2930     }
2931   } else {
2932     assert(patch_code == lir_patch_none, "Patch code not supported");
2933     assert(addr->disp() == 0, "can't have both: index and disp");
2934     __ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register());
2935   }
2936 }
2937 
2938 
2939 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2940   ShouldNotReachHere();
2941 }
2942 
2943 
2944 #ifdef ASSERT
2945 // Emit run-time assertion.
2946 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2947   Unimplemented();
2948 }
2949 #endif
2950 
2951 
2952 void LIR_Assembler::peephole(LIR_List* lir) {
2953   // Optimize instruction pairs before emitting.
2954   LIR_OpList* inst = lir->instructions_list();
2955   for (int i = 1; i < inst->length(); i++) {
2956     LIR_Op* op = inst->at(i);
2957 
2958     // 2 register-register-moves
2959     if (op->code() == lir_move) {
2960       LIR_Opr in2  = ((LIR_Op1*)op)->in_opr(),
2961               res2 = ((LIR_Op1*)op)->result_opr();
2962       if (in2->is_register() && res2->is_register()) {
2963         LIR_Op* prev = inst->at(i - 1);
2964         if (prev && prev->code() == lir_move) {
2965           LIR_Opr in1  = ((LIR_Op1*)prev)->in_opr(),
2966                   res1 = ((LIR_Op1*)prev)->result_opr();
2967           if (in1->is_same_register(res2) && in2->is_same_register(res1)) {
2968             inst->remove_at(i);
2969           }
2970         }
2971       }
2972     }
2973 
2974   }
2975   return;
2976 }
2977 
2978 
2979 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
2980   const LIR_Address *addr = src->as_address_ptr();
2981   assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!");
2982   const Register Rptr = addr->base()->as_pointer_register(),
2983                  Rtmp = tmp->as_register();
2984   Register Rco = noreg;
2985   if (UseCompressedOops && data->is_oop()) {
2986     Rco = __ encode_heap_oop(Rtmp, data->as_register());
2987   }
2988 
2989   Label Lretry;
2990   __ bind(Lretry);
2991 
2992   if (data->type() == T_INT) {
2993     const Register Rold = dest->as_register(),
2994                    Rsrc = data->as_register();
2995     assert_different_registers(Rptr, Rtmp, Rold, Rsrc);
2996     __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
2997     if (code == lir_xadd) {
2998       __ add(Rtmp, Rsrc, Rold);
2999       __ stwcx_(Rtmp, Rptr);
3000     } else {
3001       __ stwcx_(Rsrc, Rptr);
3002     }
3003   } else if (data->is_oop()) {
3004     assert(code == lir_xchg, "xadd for oops");
3005     const Register Rold = dest->as_register();
3006     if (UseCompressedOops) {
3007       assert_different_registers(Rptr, Rold, Rco);
3008       __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
3009       __ stwcx_(Rco, Rptr);
3010     } else {
3011       const Register Robj = data->as_register();
3012       assert_different_registers(Rptr, Rold, Robj);
3013       __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
3014       __ stdcx_(Robj, Rptr);
3015     }
3016   } else if (data->type() == T_LONG) {
3017     const Register Rold = dest->as_register_lo(),
3018                    Rsrc = data->as_register_lo();
3019     assert_different_registers(Rptr, Rtmp, Rold, Rsrc);
3020     __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
3021     if (code == lir_xadd) {
3022       __ add(Rtmp, Rsrc, Rold);
3023       __ stdcx_(Rtmp, Rptr);
3024     } else {
3025       __ stdcx_(Rsrc, Rptr);
3026     }
3027   } else {
3028     ShouldNotReachHere();
3029   }
3030 
3031   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
3032     __ bne_predict_not_taken(CCR0, Lretry);
3033   } else {
3034     __ bne(                  CCR0, Lretry);
3035   }
3036 
3037   if (UseCompressedOops && data->is_oop()) {
3038     __ decode_heap_oop(dest->as_register());
3039   }
3040 }
3041 
3042 
3043 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3044   Register obj = op->obj()->as_register();
3045   Register tmp = op->tmp()->as_pointer_register();
3046   LIR_Address* mdo_addr = op->mdp()->as_address_ptr();
3047   ciKlass* exact_klass = op->exact_klass();
3048   intptr_t current_klass = op->current_klass();
3049   bool not_null = op->not_null();
3050   bool no_conflict = op->no_conflict();
3051 
3052   Label Lupdate, Ldo_update, Ldone;
3053 
3054   bool do_null = !not_null;
3055   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3056   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3057 
3058   assert(do_null || do_update, "why are we here?");
3059   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3060 
3061   __ verify_oop(obj, FILE_AND_LINE);
3062 
3063   if (do_null) {
3064     if (!TypeEntries::was_null_seen(current_klass)) {
3065       __ cmpdi(CCR0, obj, 0);
3066       __ bne(CCR0, Lupdate);
3067       __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3068       __ ori(R0, R0, TypeEntries::null_seen);
3069       if (do_update) {
3070         __ b(Ldo_update);
3071       } else {
3072         __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3073       }
3074     } else {
3075       if (do_update) {
3076         __ cmpdi(CCR0, obj, 0);
3077         __ beq(CCR0, Ldone);
3078       }
3079     }
3080 #ifdef ASSERT
3081   } else {
3082     __ cmpdi(CCR0, obj, 0);
3083     __ bne(CCR0, Lupdate);
3084     __ stop("unexpect null obj");
3085 #endif
3086   }
3087 
3088   __ bind(Lupdate);
3089   if (do_update) {
3090     Label Lnext;
3091     const Register klass = R29_TOC; // kill and reload
3092     bool klass_reg_used = false;
3093 #ifdef ASSERT
3094     if (exact_klass != NULL) {
3095       Label ok;
3096       klass_reg_used = true;
3097       __ load_klass(klass, obj);
3098       metadata2reg(exact_klass->constant_encoding(), R0);
3099       __ cmpd(CCR0, klass, R0);
3100       __ beq(CCR0, ok);
3101       __ stop("exact klass and actual klass differ");
3102       __ bind(ok);
3103     }
3104 #endif
3105 
3106     if (!no_conflict) {
3107       if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
3108         klass_reg_used = true;
3109         if (exact_klass != NULL) {
3110           __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3111           metadata2reg(exact_klass->constant_encoding(), klass);
3112         } else {
3113           __ load_klass(klass, obj);
3114           __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj
3115         }
3116 
3117         // Like InterpreterMacroAssembler::profile_obj_type
3118         __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
3119         // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
3120         __ cmpd(CCR1, R0, klass);
3121         // Klass seen before, nothing to do (regardless of unknown bit).
3122         //beq(CCR1, do_nothing);
3123 
3124         __ andi_(R0, klass, TypeEntries::type_unknown);
3125         // Already unknown. Nothing to do anymore.
3126         //bne(CCR0, do_nothing);
3127         __ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
3128         __ beq(CCR0, Lnext);
3129 
3130         if (TypeEntries::is_type_none(current_klass)) {
3131           __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
3132           __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
3133           __ beq(CCR0, Ldo_update); // First time here. Set profile type.
3134         }
3135 
3136       } else {
3137         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3138                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3139 
3140         __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3141         __ andi_(R0, tmp, TypeEntries::type_unknown);
3142         // Already unknown. Nothing to do anymore.
3143         __ bne(CCR0, Lnext);
3144       }
3145 
3146       // Different than before. Cannot keep accurate profile.
3147       __ ori(R0, tmp, TypeEntries::type_unknown);
3148     } else {
3149       // There's a single possible klass at this profile point
3150       assert(exact_klass != NULL, "should be");
3151       __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3152 
3153       if (TypeEntries::is_type_none(current_klass)) {
3154         klass_reg_used = true;
3155         metadata2reg(exact_klass->constant_encoding(), klass);
3156 
3157         __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
3158         // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
3159         __ cmpd(CCR1, R0, klass);
3160         // Klass seen before, nothing to do (regardless of unknown bit).
3161         __ beq(CCR1, Lnext);
3162 #ifdef ASSERT
3163         {
3164           Label ok;
3165           __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
3166           __ beq(CCR0, ok); // First time here.
3167 
3168           __ stop("unexpected profiling mismatch");
3169           __ bind(ok);
3170         }
3171 #endif
3172         // First time here. Set profile type.
3173         __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
3174       } else {
3175         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3176                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3177 
3178         // Already unknown. Nothing to do anymore.
3179         __ andi_(R0, tmp, TypeEntries::type_unknown);
3180         __ bne(CCR0, Lnext);
3181 
3182         // Different than before. Cannot keep accurate profile.
3183         __ ori(R0, tmp, TypeEntries::type_unknown);
3184       }
3185     }
3186 
3187     __ bind(Ldo_update);
3188     __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3189 
3190     __ bind(Lnext);
3191     if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit
3192   }
3193   __ bind(Ldone);
3194 }
3195 
3196 
3197 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3198   assert(op->crc()->is_single_cpu(), "crc must be register");
3199   assert(op->val()->is_single_cpu(), "byte value must be register");
3200   assert(op->result_opr()->is_single_cpu(), "result must be register");
3201   Register crc = op->crc()->as_register();
3202   Register val = op->val()->as_register();
3203   Register res = op->result_opr()->as_register();
3204 
3205   assert_different_registers(val, crc, res);
3206 
3207   __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);
3208   __ kernel_crc32_singleByteReg(crc, val, res, true);
3209   __ mr(res, crc);
3210 }
3211 
3212 #undef __