1 /*
   2  * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2024 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"
  33 #include "ci/ciInstance.hpp"
  34 #include "gc/shared/collectedHeap.hpp"
  35 #include "memory/universe.hpp"
  36 #include "nativeInst_s390.hpp"
  37 #include "oops/objArrayKlass.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/safepointMechanism.inline.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "utilities/macros.hpp"
  43 #include "utilities/powerOfTwo.hpp"
  44 #include "vmreg_s390.inline.hpp"
  45 
  46 #define __ _masm->
  47 
  48 #ifndef PRODUCT
  49 #undef __
  50 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm) : _masm)->
  51 #endif
  52 
  53 //------------------------------------------------------------
  54 
  55 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
  56   // Not used on ZARCH_64
  57   ShouldNotCallThis();
  58   return false;
  59 }
  60 
  61 LIR_Opr LIR_Assembler::receiverOpr() {
  62   return FrameMap::Z_R2_oop_opr;
  63 }
  64 
  65 LIR_Opr LIR_Assembler::osrBufferPointer() {
  66   return FrameMap::Z_R2_opr;
  67 }
  68 
  69 int LIR_Assembler::initial_frame_size_in_bytes() const {
  70   return in_bytes(frame_map()->framesize_in_bytes());
  71 }
  72 
  73 // Inline cache check: done before the frame is built.
  74 // The inline cached class is in Z_inline_cache(Z_R9).
  75 // We fetch the class of the receiver and compare it with the cached class.
  76 // If they do not match we jump to the slow case.
  77 int LIR_Assembler::check_icache() {
  78   return __ ic_check(CodeEntryAlignment);
  79 }
  80 
  81 void LIR_Assembler::clinit_barrier(ciMethod* method) {
  82   assert(!method->holder()->is_not_initialized(), "initialization should have been started");
  83 
  84   Label L_skip_barrier;
  85   Register klass = Z_R1_scratch;
  86 
  87   metadata2reg(method->holder()->constant_encoding(), klass);
  88   __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
  89 
  90   __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
  91   __ z_br(klass);
  92 
  93   __ bind(L_skip_barrier);
  94 }
  95 
  96 void LIR_Assembler::osr_entry() {
  97   // On-stack-replacement entry sequence (interpreter frame layout described in frame_s390.hpp):
  98   //
  99   //   1. Create a new compiled activation.
 100   //   2. Initialize local variables in the compiled activation. The expression stack must be empty
 101   //      at the osr_bci; it is not initialized.
 102   //   3. Jump to the continuation address in compiled code to resume execution.
 103 
 104   // OSR entry point
 105   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 106   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 107   ValueStack* entry_state = osr_entry->end()->state();
 108   int number_of_locks = entry_state->locks_size();
 109 
 110   // Create a frame for the compiled activation.
 111   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 112 
 113   // OSR buffer is
 114   //
 115   // locals[nlocals-1..0]
 116   // monitors[number_of_locks-1..0]
 117   //
 118   // Locals is a direct copy of the interpreter frame so in the osr buffer
 119   // the first slot in the local array is the last local from the interpreter
 120   // and the last slot is local[0] (receiver) from the interpreter
 121   //
 122   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 123   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 124   // in the interpreter frame (the method lock if a sync method)
 125 
 126   // Initialize monitors in the compiled activation.
 127   //   I0: pointer to osr buffer
 128   //
 129   // All other registers are dead at this point and the locals will be
 130   // copied into place by code emitted in the IR.
 131 
 132   Register OSR_buf = osrBufferPointer()->as_register();
 133   {
 134     assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 135 
 136     const int locals_space = BytesPerWord * method() -> max_locals();
 137     int monitor_offset = locals_space + (2 * BytesPerWord) * (number_of_locks - 1);
 138     bool large_offset = !Immediate::is_simm20(monitor_offset + BytesPerWord) && number_of_locks > 0;
 139 
 140     if (large_offset) {
 141       // z_lg can only handle displacement upto 20bit signed binary integer
 142       __ z_algfi(OSR_buf, locals_space);
 143       monitor_offset -= locals_space;
 144     }
 145 
 146     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 147     // the OSR buffer using 2 word entries: first the lock and then
 148     // the oop.
 149     for (int i = 0; i < number_of_locks; i++) {
 150       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 151       // Verify the interpreter's monitor has a non-null object.
 152       __ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is null", __LINE__);
 153       // Copy the lock field into the compiled activation.
 154       __ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf);
 155       __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i));
 156       __ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf);
 157       __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i));
 158     }
 159 
 160     if (large_offset) {
 161       __ z_slgfi(OSR_buf, locals_space);
 162     }
 163   }
 164 }
 165 
 166 // --------------------------------------------------------------------------------------------
 167 
 168 address LIR_Assembler::emit_call_c(address a) {
 169   __ align_call_far_patchable(__ pc());
 170   address call_addr = __ call_c_opt(a);
 171   if (call_addr == nullptr) {
 172     bailout("const section overflow");
 173   }
 174   return call_addr;
 175 }
 176 
 177 int LIR_Assembler::emit_exception_handler() {
 178   // Generate code for exception handler.
 179   address handler_base = __ start_a_stub(exception_handler_size());
 180   if (handler_base == nullptr) {
 181     // Not enough space left for the handler.
 182     bailout("exception handler overflow");
 183     return -1;
 184   }
 185 
 186   int offset = code_offset();
 187 
 188   address a = Runtime1::entry_for (StubId::c1_handle_exception_from_callee_id);
 189   address call_addr = emit_call_c(a);
 190   CHECK_BAILOUT_(-1);
 191   __ should_not_reach_here();
 192   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 193   __ end_a_stub();
 194 
 195   return offset;
 196 }
 197 
 198 // Emit the code to remove the frame from the stack in the exception
 199 // unwind path.
 200 int LIR_Assembler::emit_unwind_handler() {
 201 #ifndef PRODUCT
 202   if (CommentedAssembly) {
 203     _masm->block_comment("Unwind handler");
 204   }
 205 #endif
 206 
 207   int offset = code_offset();
 208   Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved.
 209   Register Rtmp1                      = Z_R11;
 210   Register Rtmp2                      = Z_R12;
 211 
 212   // Fetch the exception from TLS and clear out exception related thread state.
 213   Address exc_oop_addr = Address(Z_thread, JavaThread::exception_oop_offset());
 214   Address exc_pc_addr  = Address(Z_thread, JavaThread::exception_pc_offset());
 215   __ z_lg(Z_EXC_OOP, exc_oop_addr);
 216   __ clear_mem(exc_oop_addr, sizeof(oop));
 217   __ clear_mem(exc_pc_addr, sizeof(intptr_t));
 218 
 219   __ bind(_unwind_handler_entry);
 220   __ verify_not_null_oop(Z_EXC_OOP);
 221   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 222     __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); // Preserve the exception.
 223   }
 224 
 225   // Perform needed unlocking.
 226   MonitorExitStub* stub = nullptr;
 227   if (method()->is_synchronized()) {
 228     // StubId::c1_monitorexit_id expects lock address in Z_R1_scratch.
 229     LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch);
 230     monitor_address(0, lock);
 231     stub = new MonitorExitStub(lock, 0);
 232     __ unlock_object(Rtmp1, Rtmp2, lock->as_register(), *stub->entry());
 233     __ bind(*stub->continuation());
 234   }
 235 
 236   if (compilation()->env()->dtrace_method_probes()) {
 237     ShouldNotReachHere(); // Not supported.
 238 #if 0
 239     __ mov(rdi, r15_thread);
 240     __ mov_metadata(rsi, method()->constant_encoding());
 241     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 242 #endif
 243   }
 244 
 245   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 246     __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved);  // Restore the exception.
 247   }
 248 
 249   // Remove the activation and dispatch to the unwind handler.
 250   __ pop_frame();
 251   __ z_lg(Z_EXC_PC, _z_common_abi(return_pc), Z_SP);
 252 
 253   // Z_EXC_OOP: exception oop
 254   // Z_EXC_PC: exception pc
 255 
 256   // Dispatch to the unwind logic.
 257   __ load_const_optimized(Z_R5, Runtime1::entry_for (StubId::c1_unwind_exception_id));
 258   __ z_br(Z_R5);
 259 
 260   // Emit the slow path assembly.
 261   if (stub != nullptr) {
 262     stub->emit_code(this);
 263   }
 264 
 265   return offset;
 266 }
 267 
 268 int LIR_Assembler::emit_deopt_handler() {
 269   // Generate code for exception handler.
 270   address handler_base = __ start_a_stub(deopt_handler_size());
 271   if (handler_base == nullptr) {
 272     // Not enough space left for the handler.
 273     bailout("deopt handler overflow");
 274     return -1;
 275   }
 276 
 277   int offset = code_offset();
 278 
 279   Label start;
 280   __ bind(start);
 281 
 282   // Size must be constant (see HandlerImpl::emit_deopt_handler).
 283   __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack());
 284   __ call(Z_R1_scratch);
 285 
 286   int entry_offset = __ offset();
 287 
 288   __ z_bru(start);
 289 
 290   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 291   assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
 292          "out of bounds read in post-call NOP check");
 293   __ end_a_stub();
 294 
 295   return entry_offset;
 296 }
 297 
 298 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 299   if (o == nullptr) {
 300     __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove.
 301   } else {
 302     AddressLiteral a = __ allocate_oop_address(o);
 303     bool success = __ load_oop_from_toc(reg, a, reg);
 304     if (!success) {
 305       bailout("const section overflow");
 306     }
 307   }
 308 }
 309 
 310 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 311   // Allocate a new index in table to hold the object once it's been patched.
 312   int oop_index = __ oop_recorder()->allocate_oop_index(nullptr);
 313   PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
 314 
 315   AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index));
 316   assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
 317   // The null will be dynamically patched later so the sequence to
 318   // load the address literal must not be optimized.
 319   __ load_const(reg, addrlit);
 320 
 321   patching_epilog(patch, lir_patch_normal, reg, info);
 322 }
 323 
 324 void LIR_Assembler::metadata2reg(Metadata* md, Register reg) {
 325   bool success = __ set_metadata_constant(md, reg);
 326   if (!success) {
 327     bailout("const section overflow");
 328     return;
 329   }
 330 }
 331 
 332 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
 333   // Allocate a new index in table to hold the klass once it's been patched.
 334   int index = __ oop_recorder()->allocate_metadata_index(nullptr);
 335   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
 336   AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index));
 337   assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
 338   // The null will be dynamically patched later so the sequence to
 339   // load the address literal must not be optimized.
 340   __ load_const(reg, addrlit);
 341 
 342   patching_epilog(patch, lir_patch_normal, reg, info);
 343 }
 344 
 345 void LIR_Assembler::emit_op3(LIR_Op3* op) {
 346   switch (op->code()) {
 347     case lir_idiv:
 348     case lir_irem:
 349       arithmetic_idiv(op->code(),
 350                       op->in_opr1(),
 351                       op->in_opr2(),
 352                       op->in_opr3(),
 353                       op->result_opr(),
 354                       op->info());
 355       break;
 356     case lir_fmad: {
 357       const FloatRegister opr1 = op->in_opr1()->as_double_reg(),
 358                           opr2 = op->in_opr2()->as_double_reg(),
 359                           opr3 = op->in_opr3()->as_double_reg(),
 360                           res  = op->result_opr()->as_double_reg();
 361       __ z_madbr(opr3, opr1, opr2);
 362       if (res != opr3) { __ z_ldr(res, opr3); }
 363     } break;
 364     case lir_fmaf: {
 365       const FloatRegister opr1 = op->in_opr1()->as_float_reg(),
 366                           opr2 = op->in_opr2()->as_float_reg(),
 367                           opr3 = op->in_opr3()->as_float_reg(),
 368                           res  = op->result_opr()->as_float_reg();
 369       __ z_maebr(opr3, opr1, opr2);
 370       if (res != opr3) { __ z_ler(res, opr3); }
 371     } break;
 372     default: ShouldNotReachHere(); break;
 373   }
 374 }
 375 
 376 
 377 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
 378 #ifdef ASSERT
 379   assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
 380   if (op->block() != nullptr)  { _branch_target_blocks.append(op->block()); }
 381   if (op->ublock() != nullptr) { _branch_target_blocks.append(op->ublock()); }
 382 #endif
 383 
 384   if (op->cond() == lir_cond_always) {
 385     if (op->info() != nullptr) { add_debug_info_for_branch(op->info()); }
 386     __ branch_optimized(Assembler::bcondAlways, *(op->label()));
 387   } else {
 388     Assembler::branch_condition acond = Assembler::bcondZero;
 389     if (op->code() == lir_cond_float_branch) {
 390       assert(op->ublock() != nullptr, "must have unordered successor");
 391       __ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label()));
 392     }
 393     switch (op->cond()) {
 394       case lir_cond_equal:        acond = Assembler::bcondEqual;     break;
 395       case lir_cond_notEqual:     acond = Assembler::bcondNotEqual;  break;
 396       case lir_cond_less:         acond = Assembler::bcondLow;       break;
 397       case lir_cond_lessEqual:    acond = Assembler::bcondNotHigh;   break;
 398       case lir_cond_greaterEqual: acond = Assembler::bcondNotLow;    break;
 399       case lir_cond_greater:      acond = Assembler::bcondHigh;      break;
 400       case lir_cond_belowEqual:   acond = Assembler::bcondNotHigh;   break;
 401       case lir_cond_aboveEqual:   acond = Assembler::bcondNotLow;    break;
 402       default:                         ShouldNotReachHere();
 403     }
 404     __ branch_optimized(acond,*(op->label()));
 405   }
 406 }
 407 
 408 
 409 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
 410   LIR_Opr src  = op->in_opr();
 411   LIR_Opr dest = op->result_opr();
 412 
 413   switch (op->bytecode()) {
 414     case Bytecodes::_i2l:
 415       __ move_reg_if_needed(dest->as_register_lo(), T_LONG, src->as_register(), T_INT);
 416       break;
 417 
 418     case Bytecodes::_l2i:
 419       __ move_reg_if_needed(dest->as_register(), T_INT, src->as_register_lo(), T_LONG);
 420       break;
 421 
 422     case Bytecodes::_i2b:
 423       __ move_reg_if_needed(dest->as_register(), T_BYTE, src->as_register(), T_INT);
 424       break;
 425 
 426     case Bytecodes::_i2c:
 427       __ move_reg_if_needed(dest->as_register(), T_CHAR, src->as_register(), T_INT);
 428       break;
 429 
 430     case Bytecodes::_i2s:
 431       __ move_reg_if_needed(dest->as_register(), T_SHORT, src->as_register(), T_INT);
 432       break;
 433 
 434     case Bytecodes::_f2d:
 435       assert(dest->is_double_fpu(), "check");
 436       __ move_freg_if_needed(dest->as_double_reg(), T_DOUBLE, src->as_float_reg(), T_FLOAT);
 437       break;
 438 
 439     case Bytecodes::_d2f:
 440       assert(dest->is_single_fpu(), "check");
 441       __ move_freg_if_needed(dest->as_float_reg(), T_FLOAT, src->as_double_reg(), T_DOUBLE);
 442       break;
 443 
 444     case Bytecodes::_i2f:
 445       __ z_cefbr(dest->as_float_reg(), src->as_register());
 446       break;
 447 
 448     case Bytecodes::_i2d:
 449       __ z_cdfbr(dest->as_double_reg(), src->as_register());
 450       break;
 451 
 452     case Bytecodes::_l2f:
 453       __ z_cegbr(dest->as_float_reg(), src->as_register_lo());
 454       break;
 455     case Bytecodes::_l2d:
 456       __ z_cdgbr(dest->as_double_reg(), src->as_register_lo());
 457       break;
 458 
 459     case Bytecodes::_f2i:
 460     case Bytecodes::_f2l: {
 461       Label done;
 462       FloatRegister Rsrc = src->as_float_reg();
 463       Register Rdst = (op->bytecode() == Bytecodes::_f2i ? dest->as_register() : dest->as_register_lo());
 464       __ clear_reg(Rdst, true, false);
 465       __ z_cebr(Rsrc, Rsrc);
 466       __ z_brno(done); // NaN -> 0
 467       if (op->bytecode() == Bytecodes::_f2i) {
 468         __ z_cfebr(Rdst, Rsrc, Assembler::to_zero);
 469       } else { // op->bytecode() == Bytecodes::_f2l
 470         __ z_cgebr(Rdst, Rsrc, Assembler::to_zero);
 471       }
 472       __ bind(done);
 473     }
 474     break;
 475 
 476     case Bytecodes::_d2i:
 477     case Bytecodes::_d2l: {
 478       Label done;
 479       FloatRegister Rsrc = src->as_double_reg();
 480       Register Rdst = (op->bytecode() == Bytecodes::_d2i ? dest->as_register() : dest->as_register_lo());
 481       __ clear_reg(Rdst, true, false);  // Don't set CC.
 482       __ z_cdbr(Rsrc, Rsrc);
 483       __ z_brno(done); // NaN -> 0
 484       if (op->bytecode() == Bytecodes::_d2i) {
 485         __ z_cfdbr(Rdst, Rsrc, Assembler::to_zero);
 486       } else { // Bytecodes::_d2l
 487         __ z_cgdbr(Rdst, Rsrc, Assembler::to_zero);
 488       }
 489       __ bind(done);
 490     }
 491     break;
 492 
 493     default: ShouldNotReachHere();
 494   }
 495 }
 496 
 497 void LIR_Assembler::align_call(LIR_Code code) {
 498   // End of call instruction must be 4 byte aligned.
 499   int offset = __ offset();
 500   switch (code) {
 501     case lir_icvirtual_call:
 502       offset += MacroAssembler::load_const_from_toc_size();
 503       // no break
 504     case lir_static_call:
 505     case lir_optvirtual_call:
 506     case lir_dynamic_call:
 507       offset += NativeCall::call_far_pcrelative_displacement_offset;
 508       break;
 509     default: ShouldNotReachHere();
 510   }
 511   if ((offset & (NativeCall::call_far_pcrelative_displacement_alignment-1)) != 0) {
 512     __ nop();
 513   }
 514 }
 515 
 516 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
 517   assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0,
 518          "must be aligned (offset=%d)", __ offset());
 519   assert(rtype == relocInfo::none ||
 520          rtype == relocInfo::opt_virtual_call_type ||
 521          rtype == relocInfo::static_call_type, "unexpected rtype");
 522   // Prepend each BRASL with a nop.
 523   __ relocate(rtype);
 524   __ z_nop();
 525   __ z_brasl(Z_R14, op->addr());
 526   add_call_info(code_offset(), op->info());
 527 }
 528 
 529 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
 530   address virtual_call_oop_addr = nullptr;
 531   AddressLiteral empty_ic((address) Universe::non_oop_word());
 532   virtual_call_oop_addr = __ pc();
 533   bool success = __ load_const_from_toc(Z_inline_cache, empty_ic);
 534   if (!success) {
 535     bailout("const section overflow");
 536     return;
 537   }
 538 
 539   // CALL to fixup routine. Fixup routine uses ScopeDesc info
 540   // to determine who we intended to call.
 541   __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr));
 542   call(op, relocInfo::none);
 543 }
 544 
 545 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 546   if (from_reg != to_reg) __ z_lgr(to_reg, from_reg);
 547 }
 548 
 549 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 550   assert(src->is_constant(), "should not call otherwise");
 551   assert(dest->is_stack(), "should not call otherwise");
 552   LIR_Const* c = src->as_constant_ptr();
 553 
 554   unsigned int lmem = 0;
 555   unsigned int lcon = 0;
 556   int64_t cbits = 0;
 557   Address dest_addr;
 558   switch (c->type()) {
 559     case T_INT:  // fall through
 560     case T_FLOAT:
 561       dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 562       lmem = 4; lcon = 4; cbits = c->as_jint_bits();
 563       break;
 564 
 565     case T_ADDRESS:
 566       dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 567       lmem = 8; lcon = 4; cbits = c->as_jint_bits();
 568       break;
 569 
 570     case T_OBJECT:
 571       dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 572       if (c->as_jobject() == nullptr) {
 573         __ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8);
 574       } else {
 575         jobject2reg(c->as_jobject(), Z_R1_scratch);
 576         __ reg2mem_opt(Z_R1_scratch, dest_addr, true);
 577       }
 578       return;
 579 
 580     case T_LONG:  // fall through
 581     case T_DOUBLE:
 582       dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 583       lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits());
 584       break;
 585 
 586     default:
 587       ShouldNotReachHere();
 588   }
 589 
 590   __ store_const(dest_addr, cbits, lmem, lcon);
 591 }
 592 
 593 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 594   assert(src->is_constant(), "should not call otherwise");
 595   assert(dest->is_address(), "should not call otherwise");
 596 
 597   LIR_Const* c = src->as_constant_ptr();
 598   Address addr = as_Address(dest->as_address_ptr());
 599 
 600   int store_offset = -1;
 601 
 602   if (dest->as_address_ptr()->index()->is_valid()) {
 603     switch (type) {
 604       case T_INT:    // fall through
 605       case T_FLOAT:
 606         __ load_const_optimized(Z_R0_scratch, c->as_jint_bits());
 607         store_offset = __ offset();
 608         if (Immediate::is_uimm12(addr.disp())) {
 609           __ z_st(Z_R0_scratch, addr);
 610         } else {
 611           __ z_sty(Z_R0_scratch, addr);
 612         }
 613         break;
 614 
 615       case T_ADDRESS:
 616         __ load_const_optimized(Z_R1_scratch, c->as_jint_bits());
 617         store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
 618         break;
 619 
 620       case T_OBJECT:  // fall through
 621       case T_ARRAY:
 622         if (c->as_jobject() == nullptr) {
 623           if (UseCompressedOops && !wide) {
 624             __ clear_reg(Z_R1_scratch, false);
 625             store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
 626           } else {
 627             __ clear_reg(Z_R1_scratch, true);
 628             store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
 629           }
 630         } else {
 631           jobject2reg(c->as_jobject(), Z_R1_scratch);
 632           if (UseCompressedOops && !wide) {
 633             __ encode_heap_oop(Z_R1_scratch);
 634             store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
 635           } else {
 636             store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
 637           }
 638         }
 639         assert(store_offset >= 0, "check");
 640         break;
 641 
 642       case T_LONG:    // fall through
 643       case T_DOUBLE:
 644         __ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits()));
 645         store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
 646         break;
 647 
 648       case T_BOOLEAN: // fall through
 649       case T_BYTE:
 650         __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
 651         store_offset = __ offset();
 652         if (Immediate::is_uimm12(addr.disp())) {
 653           __ z_stc(Z_R0_scratch, addr);
 654         } else {
 655           __ z_stcy(Z_R0_scratch, addr);
 656         }
 657         break;
 658 
 659       case T_CHAR:    // fall through
 660       case T_SHORT:
 661         __ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint()));
 662         store_offset = __ offset();
 663         if (Immediate::is_uimm12(addr.disp())) {
 664           __ z_sth(Z_R0_scratch, addr);
 665         } else {
 666           __ z_sthy(Z_R0_scratch, addr);
 667         }
 668         break;
 669 
 670       default:
 671         ShouldNotReachHere();
 672     }
 673 
 674   } else { // no index
 675 
 676     unsigned int lmem = 0;
 677     unsigned int lcon = 0;
 678     int64_t cbits = 0;
 679 
 680     switch (type) {
 681       case T_INT:    // fall through
 682       case T_FLOAT:
 683         lmem = 4; lcon = 4; cbits = c->as_jint_bits();
 684         break;
 685 
 686       case T_ADDRESS:
 687         lmem = 8; lcon = 4; cbits = c->as_jint_bits();
 688         break;
 689 
 690       case T_OBJECT:  // fall through
 691       case T_ARRAY:
 692         if (c->as_jobject() == nullptr) {
 693           if (UseCompressedOops && !wide) {
 694             store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4);
 695           } else {
 696             store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8);
 697           }
 698         } else {
 699           jobject2reg(c->as_jobject(), Z_R1_scratch);
 700           if (UseCompressedOops && !wide) {
 701             __ encode_heap_oop(Z_R1_scratch);
 702             store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
 703           } else {
 704             store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
 705           }
 706         }
 707         assert(store_offset >= 0, "check");
 708         break;
 709 
 710       case T_LONG:    // fall through
 711       case T_DOUBLE:
 712         lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits());
 713         break;
 714 
 715       case T_BOOLEAN: // fall through
 716       case T_BYTE:
 717         lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint());
 718         break;
 719 
 720       case T_CHAR:    // fall through
 721       case T_SHORT:
 722         lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint());
 723         break;
 724 
 725       default:
 726         ShouldNotReachHere();
 727     }
 728 
 729     if (store_offset == -1) {
 730       store_offset = __ store_const(addr, cbits, lmem, lcon);
 731       assert(store_offset >= 0, "check");
 732     }
 733   }
 734 
 735   if (info != nullptr) {
 736     add_debug_info_for_null_check(store_offset, info);
 737   }
 738 }
 739 
 740 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 741   assert(src->is_constant(), "should not call otherwise");
 742   assert(dest->is_register(), "should not call otherwise");
 743   LIR_Const* c = src->as_constant_ptr();
 744 
 745   switch (c->type()) {
 746     case T_INT: {
 747       assert(patch_code == lir_patch_none, "no patching handled here");
 748       __ load_const_optimized(dest->as_register(), c->as_jint());
 749       break;
 750     }
 751 
 752     case T_ADDRESS: {
 753       assert(patch_code == lir_patch_none, "no patching handled here");
 754       __ load_const_optimized(dest->as_register(), c->as_jint());
 755       break;
 756     }
 757 
 758     case T_LONG: {
 759       assert(patch_code == lir_patch_none, "no patching handled here");
 760       __ load_const_optimized(dest->as_register_lo(), (intptr_t)c->as_jlong());
 761       break;
 762     }
 763 
 764     case T_OBJECT: {
 765       if (patch_code != lir_patch_none) {
 766         jobject2reg_with_patching(dest->as_register(), info);
 767       } else {
 768         jobject2reg(c->as_jobject(), dest->as_register());
 769       }
 770       break;
 771     }
 772 
 773     case T_METADATA: {
 774       if (patch_code != lir_patch_none) {
 775         klass2reg_with_patching(dest->as_register(), info);
 776       } else {
 777         metadata2reg(c->as_metadata(), dest->as_register());
 778       }
 779       break;
 780     }
 781 
 782     case T_FLOAT: {
 783       Register toc_reg = Z_R1_scratch;
 784       __ load_toc(toc_reg);
 785       address const_addr = __ float_constant(c->as_jfloat());
 786       if (const_addr == nullptr) {
 787         bailout("const section overflow");
 788         break;
 789       }
 790       int displ = const_addr - _masm->code()->consts()->start();
 791       if (dest->is_single_fpu()) {
 792         __ z_ley(dest->as_float_reg(), displ, toc_reg);
 793       } else {
 794         assert(dest->is_single_cpu(), "Must be a cpu register.");
 795         __ z_ly(dest->as_register(), displ, toc_reg);
 796       }
 797     }
 798     break;
 799 
 800     case T_DOUBLE: {
 801       Register toc_reg = Z_R1_scratch;
 802       __ load_toc(toc_reg);
 803       address const_addr = __ double_constant(c->as_jdouble());
 804       if (const_addr == nullptr) {
 805         bailout("const section overflow");
 806         break;
 807       }
 808       int displ = const_addr - _masm->code()->consts()->start();
 809       if (dest->is_double_fpu()) {
 810         __ z_ldy(dest->as_double_reg(), displ, toc_reg);
 811       } else {
 812         assert(dest->is_double_cpu(), "Must be a long register.");
 813         __ z_lg(dest->as_register_lo(), displ, toc_reg);
 814       }
 815     }
 816     break;
 817 
 818     default:
 819       ShouldNotReachHere();
 820   }
 821 }
 822 
 823 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 824   if (addr->base()->is_illegal()) {
 825     Unimplemented();
 826   }
 827 
 828   Register base = addr->base()->as_pointer_register();
 829 
 830   if (addr->index()->is_illegal()) {
 831     return Address(base, addr->disp());
 832   } else if (addr->index()->is_cpu_register()) {
 833     Register index = addr->index()->as_pointer_register();
 834     return Address(base, index, addr->disp());
 835   } else if (addr->index()->is_constant()) {
 836     intptr_t addr_offset = addr->index()->as_constant_ptr()->as_jint() + addr->disp();
 837     return Address(base, addr_offset);
 838   } else {
 839     ShouldNotReachHere();
 840     return Address();
 841   }
 842 }
 843 
 844 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 845   switch (type) {
 846     case T_INT:
 847     case T_FLOAT: {
 848       Register tmp = Z_R1_scratch;
 849       Address from = frame_map()->address_for_slot(src->single_stack_ix());
 850       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
 851       __ mem2reg_opt(tmp, from, false);
 852       __ reg2mem_opt(tmp, to, false);
 853       break;
 854     }
 855     case T_ADDRESS:
 856     case T_OBJECT: {
 857       Register tmp = Z_R1_scratch;
 858       Address from = frame_map()->address_for_slot(src->single_stack_ix());
 859       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
 860       __ mem2reg_opt(tmp, from, true);
 861       __ reg2mem_opt(tmp, to, true);
 862       break;
 863     }
 864     case T_LONG:
 865     case T_DOUBLE: {
 866       Register tmp = Z_R1_scratch;
 867       Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
 868       Address to   = frame_map()->address_for_double_slot(dest->double_stack_ix());
 869       __ mem2reg_opt(tmp, from, true);
 870       __ reg2mem_opt(tmp, to, true);
 871       break;
 872     }
 873 
 874     default:
 875       ShouldNotReachHere();
 876   }
 877 }
 878 
 879 // 4-byte accesses only! Don't use it to access 8 bytes!
 880 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
 881   ShouldNotCallThis();
 882   return Address(); // unused
 883 }
 884 
 885 // 4-byte accesses only! Don't use it to access 8 bytes!
 886 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 887   ShouldNotCallThis();
 888   return Address(); // unused
 889 }
 890 
 891 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code,
 892                             CodeEmitInfo* info, bool wide) {
 893 
 894   assert(type != T_METADATA, "load of metadata ptr not supported");
 895   LIR_Address* addr = src_opr->as_address_ptr();
 896   LIR_Opr to_reg = dest;
 897 
 898   Register src = addr->base()->as_pointer_register();
 899   Register disp_reg = Z_R0;
 900   int disp_value = addr->disp();
 901   bool needs_patching = (patch_code != lir_patch_none);
 902 
 903   if (addr->base()->type() == T_OBJECT) {
 904     __ verify_oop(src, FILE_AND_LINE);
 905   }
 906 
 907   PatchingStub* patch = nullptr;
 908   if (needs_patching) {
 909     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
 910     assert(!to_reg->is_double_cpu() ||
 911            patch_code == lir_patch_none ||
 912            patch_code == lir_patch_normal, "patching doesn't match register");
 913   }
 914 
 915   if (addr->index()->is_illegal()) {
 916     if (!Immediate::is_simm20(disp_value)) {
 917       if (needs_patching) {
 918         __ load_const(Z_R1_scratch, (intptr_t)0);
 919       } else {
 920         __ load_const_optimized(Z_R1_scratch, disp_value);
 921       }
 922       disp_reg = Z_R1_scratch;
 923       disp_value = 0;
 924     }
 925   } else {
 926     if (!Immediate::is_simm20(disp_value)) {
 927       __ load_const_optimized(Z_R1_scratch, disp_value);
 928       __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register());
 929       disp_reg = Z_R1_scratch;
 930       disp_value = 0;
 931     }
 932     disp_reg = addr->index()->as_pointer_register();
 933   }
 934 
 935   // Remember the offset of the load. The patching_epilog must be done
 936   // before the call to add_debug_info, otherwise the PcDescs don't get
 937   // entered in increasing order.
 938   int offset = code_offset();
 939 
 940   assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up");
 941 
 942   bool short_disp = Immediate::is_uimm12(disp_value);
 943 
 944   switch (type) {
 945     case T_BOOLEAN: // fall through
 946     case T_BYTE  :  __ z_lb(dest->as_register(),   disp_value, disp_reg, src); break;
 947     case T_CHAR  :  __ z_llgh(dest->as_register(), disp_value, disp_reg, src); break;
 948     case T_SHORT :
 949       if (short_disp) {
 950                     __ z_lh(dest->as_register(),   disp_value, disp_reg, src);
 951       } else {
 952                     __ z_lhy(dest->as_register(),  disp_value, disp_reg, src);
 953       }
 954       break;
 955     case T_INT   :
 956       if (short_disp) {
 957                     __ z_l(dest->as_register(),    disp_value, disp_reg, src);
 958       } else {
 959                     __ z_ly(dest->as_register(),   disp_value, disp_reg, src);
 960       }
 961       break;
 962     case T_ADDRESS:
 963       __ z_lg(dest->as_register(), disp_value, disp_reg, src);
 964       break;
 965     case T_ARRAY : // fall through
 966     case T_OBJECT:
 967     {
 968       if (UseCompressedOops && !wide) {
 969         __ z_llgf(dest->as_register(), disp_value, disp_reg, src);
 970         __ oop_decoder(dest->as_register(), dest->as_register(), true);
 971       } else {
 972         __ z_lg(dest->as_register(), disp_value, disp_reg, src);
 973       }
 974       __ verify_oop(dest->as_register(), FILE_AND_LINE);
 975       break;
 976     }
 977     case T_FLOAT:
 978       if (short_disp) {
 979                     __ z_le(dest->as_float_reg(),  disp_value, disp_reg, src);
 980       } else {
 981                     __ z_ley(dest->as_float_reg(), disp_value, disp_reg, src);
 982       }
 983       break;
 984     case T_DOUBLE:
 985       if (short_disp) {
 986                     __ z_ld(dest->as_double_reg(),  disp_value, disp_reg, src);
 987       } else {
 988                     __ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src);
 989       }
 990       break;
 991     case T_LONG  :  __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break;
 992     default      : ShouldNotReachHere();
 993   }
 994 
 995   if (patch != nullptr) {
 996     patching_epilog(patch, patch_code, src, info);
 997   }
 998   if (info != nullptr) add_debug_info_for_null_check(offset, info);
 999 }
1000 
1001 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1002   assert(src->is_stack(), "should not call otherwise");
1003   assert(dest->is_register(), "should not call otherwise");
1004 
1005   if (dest->is_single_cpu()) {
1006     if (is_reference_type(type)) {
1007       __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
1008       __ verify_oop(dest->as_register(), FILE_AND_LINE);
1009     } else if (type == T_METADATA || type == T_ADDRESS) {
1010       __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
1011     } else {
1012       __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false);
1013     }
1014   } else if (dest->is_double_cpu()) {
1015     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix());
1016     __ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true);
1017   } else if (dest->is_single_fpu()) {
1018     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1019     __ mem2freg_opt(dest->as_float_reg(), src_addr, false);
1020   } else if (dest->is_double_fpu()) {
1021     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1022     __ mem2freg_opt(dest->as_double_reg(), src_addr, true);
1023   } else {
1024     ShouldNotReachHere();
1025   }
1026 }
1027 
1028 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1029   assert(src->is_register(), "should not call otherwise");
1030   assert(dest->is_stack(), "should not call otherwise");
1031 
1032   if (src->is_single_cpu()) {
1033     const Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
1034     if (is_reference_type(type)) {
1035       __ verify_oop(src->as_register(), FILE_AND_LINE);
1036       __ reg2mem_opt(src->as_register(), dst, true);
1037     } else if (type == T_METADATA || type == T_ADDRESS) {
1038       __ reg2mem_opt(src->as_register(), dst, true);
1039     } else {
1040       __ reg2mem_opt(src->as_register(), dst, false);
1041     }
1042   } else if (src->is_double_cpu()) {
1043     Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix());
1044     __ reg2mem_opt(src->as_register_lo(), dstLO, true);
1045   } else if (src->is_single_fpu()) {
1046     Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
1047     __ freg2mem_opt(src->as_float_reg(), dst_addr, false);
1048   } else if (src->is_double_fpu()) {
1049     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
1050     __ freg2mem_opt(src->as_double_reg(), dst_addr, true);
1051   } else {
1052     ShouldNotReachHere();
1053   }
1054 }
1055 
1056 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1057   if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1058     if (from_reg->is_double_fpu()) {
1059       // double to double moves
1060       assert(to_reg->is_double_fpu(), "should match");
1061       __ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg());
1062     } else {
1063       // float to float moves
1064       assert(to_reg->is_single_fpu(), "should match");
1065       __ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg());
1066     }
1067   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1068     if (from_reg->is_double_cpu()) {
1069       __ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register());
1070     } else if (to_reg->is_double_cpu()) {
1071       // int to int moves
1072       __ z_lgr(to_reg->as_register_lo(), from_reg->as_register());
1073     } else {
1074       // int to int moves
1075       __ z_lgr(to_reg->as_register(), from_reg->as_register());
1076     }
1077   } else {
1078     ShouldNotReachHere();
1079   }
1080   if (is_reference_type(to_reg->type())) {
1081     __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
1082   }
1083 }
1084 
1085 void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type,
1086                             LIR_PatchCode patch_code, CodeEmitInfo* info,
1087                             bool wide) {
1088   assert(type != T_METADATA, "store of metadata ptr not supported");
1089   LIR_Address* addr = dest_opr->as_address_ptr();
1090 
1091   Register dest = addr->base()->as_pointer_register();
1092   Register disp_reg = Z_R0;
1093   int disp_value = addr->disp();
1094   bool needs_patching = (patch_code != lir_patch_none);
1095 
1096   if (addr->base()->is_oop_register()) {
1097     __ verify_oop(dest, FILE_AND_LINE);
1098   }
1099 
1100   PatchingStub* patch = nullptr;
1101   if (needs_patching) {
1102     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1103     assert(!from->is_double_cpu() ||
1104            patch_code == lir_patch_none ||
1105            patch_code == lir_patch_normal, "patching doesn't match register");
1106   }
1107 
1108   assert(!needs_patching || (!Immediate::is_simm20(disp_value) && addr->index()->is_illegal()), "assumption");
1109   if (addr->index()->is_illegal()) {
1110     if (!Immediate::is_simm20(disp_value)) {
1111       if (needs_patching) {
1112         __ load_const(Z_R1_scratch, (intptr_t)0);
1113       } else {
1114         __ load_const_optimized(Z_R1_scratch, disp_value);
1115       }
1116       disp_reg = Z_R1_scratch;
1117       disp_value = 0;
1118     }
1119   } else {
1120     if (!Immediate::is_simm20(disp_value)) {
1121       __ load_const_optimized(Z_R1_scratch, disp_value);
1122       __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register());
1123       disp_reg = Z_R1_scratch;
1124       disp_value = 0;
1125     }
1126     disp_reg = addr->index()->as_pointer_register();
1127   }
1128 
1129   assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up");
1130 
1131   if (is_reference_type(type)) {
1132     __ verify_oop(from->as_register(), FILE_AND_LINE);
1133   }
1134 
1135   bool short_disp = Immediate::is_uimm12(disp_value);
1136 
1137   // Remember the offset of the store. The patching_epilog must be done
1138   // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1139   // entered in increasing order.
1140   int offset = code_offset();
1141   switch (type) {
1142     case T_BOOLEAN: // fall through
1143     case T_BYTE  :
1144       if (short_disp) {
1145                     __ z_stc(from->as_register(),  disp_value, disp_reg, dest);
1146       } else {
1147                     __ z_stcy(from->as_register(), disp_value, disp_reg, dest);
1148       }
1149       break;
1150     case T_CHAR  : // fall through
1151     case T_SHORT :
1152       if (short_disp) {
1153                     __ z_sth(from->as_register(),  disp_value, disp_reg, dest);
1154       } else {
1155                     __ z_sthy(from->as_register(), disp_value, disp_reg, dest);
1156       }
1157       break;
1158     case T_INT   :
1159       if (short_disp) {
1160                     __ z_st(from->as_register(),  disp_value, disp_reg, dest);
1161       } else {
1162                     __ z_sty(from->as_register(), disp_value, disp_reg, dest);
1163       }
1164       break;
1165     case T_LONG  :  __ z_stg(from->as_register_lo(), disp_value, disp_reg, dest); break;
1166     case T_ADDRESS: __ z_stg(from->as_register(),    disp_value, disp_reg, dest); break;
1167       break;
1168     case T_ARRAY : // fall through
1169     case T_OBJECT:
1170       {
1171         if (UseCompressedOops && !wide) {
1172           Register compressed_src = Z_R14;
1173           __ oop_encoder(compressed_src, from->as_register(), true, (disp_reg != Z_R1) ? Z_R1 : Z_R0, -1, true);
1174           offset = code_offset();
1175           if (short_disp) {
1176             __ z_st(compressed_src,  disp_value, disp_reg, dest);
1177           } else {
1178             __ z_sty(compressed_src, disp_value, disp_reg, dest);
1179           }
1180         } else {
1181           __ z_stg(from->as_register(), disp_value, disp_reg, dest);
1182         }
1183         break;
1184       }
1185     case T_FLOAT :
1186       if (short_disp) {
1187         __ z_ste(from->as_float_reg(),  disp_value, disp_reg, dest);
1188       } else {
1189         __ z_stey(from->as_float_reg(), disp_value, disp_reg, dest);
1190       }
1191       break;
1192     case T_DOUBLE:
1193       if (short_disp) {
1194         __ z_std(from->as_double_reg(),  disp_value, disp_reg, dest);
1195       } else {
1196         __ z_stdy(from->as_double_reg(), disp_value, disp_reg, dest);
1197       }
1198       break;
1199     default: ShouldNotReachHere();
1200   }
1201 
1202   if (patch != nullptr) {
1203     patching_epilog(patch, patch_code, dest, info);
1204   }
1205 
1206   if (info != nullptr) add_debug_info_for_null_check(offset, info);
1207 }
1208 
1209 
1210 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
1211   assert(result->is_illegal() ||
1212          (result->is_single_cpu() && result->as_register() == Z_R2) ||
1213          (result->is_double_cpu() && result->as_register_lo() == Z_R2) ||
1214          (result->is_single_fpu() && result->as_float_reg() == Z_F0) ||
1215          (result->is_double_fpu() && result->as_double_reg() == Z_F0), "convention");
1216 
1217   __ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::polling_page_offset()));
1218 
1219   // Pop the frame before the safepoint code.
1220   __ pop_frame_restore_retPC(initial_frame_size_in_bytes());
1221 
1222   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
1223     __ reserved_stack_check(Z_R14);
1224   }
1225 
1226   // We need to mark the code position where the load from the safepoint
1227   // polling page was emitted as relocInfo::poll_return_type here.
1228   __ relocate(relocInfo::poll_return_type);
1229   __ load_from_polling_page(Z_R1_scratch);
1230 
1231   __ z_br(Z_R14); // Return to caller.
1232 }
1233 
1234 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1235   const Register poll_addr = tmp->as_register_lo();
1236   __ z_lg(poll_addr, Address(Z_thread, JavaThread::polling_page_offset()));
1237   guarantee(info != nullptr, "Shouldn't be null");
1238   add_debug_info_for_branch(info);
1239   int offset = __ offset();
1240   __ relocate(relocInfo::poll_type);
1241   __ load_from_polling_page(poll_addr);
1242   return offset;
1243 }
1244 
1245 void LIR_Assembler::emit_static_call_stub() {
1246 
1247   // Stub is fixed up when the corresponding call is converted from calling
1248   // compiled code to calling interpreted code.
1249 
1250   address call_pc = __ pc();
1251   address stub = __ start_a_stub(call_stub_size());
1252   if (stub == nullptr) {
1253     bailout("static call stub overflow");
1254     return;
1255   }
1256 
1257   int start = __ offset();
1258 
1259   __ relocate(static_stub_Relocation::spec(call_pc));
1260 
1261   // See also Matcher::interpreter_method_reg().
1262   AddressLiteral meta = __ allocate_metadata_address(nullptr);
1263   bool success = __ load_const_from_toc(Z_method, meta);
1264 
1265   __ set_inst_mark();
1266   AddressLiteral a((address)-1);
1267   success = success && __ load_const_from_toc(Z_R1, a);
1268   if (!success) {
1269     bailout("const section overflow");
1270     return;
1271   }
1272 
1273   __ z_br(Z_R1);
1274   assert(__ offset() - start <= call_stub_size(), "stub too big");
1275   __ end_a_stub(); // Update current stubs pointer and restore insts_end.
1276 }
1277 
1278 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1279   bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual;
1280   if (opr1->is_single_cpu()) {
1281     Register reg1 = opr1->as_register();
1282     if (opr2->is_single_cpu()) {
1283       // cpu register - cpu register
1284       if (is_reference_type(opr1->type())) {
1285         __ z_clgr(reg1, opr2->as_register());
1286       } else {
1287         assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1288         if (unsigned_comp) {
1289           __ z_clr(reg1, opr2->as_register());
1290         } else {
1291           __ z_cr(reg1, opr2->as_register());
1292         }
1293       }
1294     } else if (opr2->is_stack()) {
1295       // cpu register - stack
1296       if (is_reference_type(opr1->type())) {
1297         __ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1298       } else {
1299         if (unsigned_comp) {
1300           __ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1301         } else {
1302           __ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1303         }
1304       }
1305     } else if (opr2->is_constant()) {
1306       // cpu register - constant
1307       LIR_Const* c = opr2->as_constant_ptr();
1308       if (c->type() == T_INT) {
1309         if (unsigned_comp) {
1310           __ z_clfi(reg1, c->as_jint());
1311         } else {
1312           __ z_cfi(reg1, c->as_jint());
1313         }
1314       } else if (c->type() == T_METADATA) {
1315         // We only need, for now, comparison with null for metadata.
1316         assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1317         Metadata* m = c->as_metadata();
1318         if (m == nullptr) {
1319           __ z_cghi(reg1, 0);
1320         } else {
1321           ShouldNotReachHere();
1322         }
1323       } else if (is_reference_type(c->type())) {
1324         // In 64bit oops are single register.
1325         jobject o = c->as_jobject();
1326         if (o == nullptr) {
1327           __ z_ltgr(reg1, reg1);
1328         } else {
1329           jobject2reg(o, Z_R1_scratch);
1330           __ z_cgr(reg1, Z_R1_scratch);
1331         }
1332       } else {
1333         fatal("unexpected type: %s", basictype_to_str(c->type()));
1334       }
1335       // cpu register - address
1336     } else if (opr2->is_address()) {
1337       if (op->info() != nullptr) {
1338         add_debug_info_for_null_check_here(op->info());
1339       }
1340       if (unsigned_comp) {
1341         __ z_cly(reg1, as_Address(opr2->as_address_ptr()));
1342       } else {
1343         __ z_cy(reg1, as_Address(opr2->as_address_ptr()));
1344       }
1345     } else {
1346       ShouldNotReachHere();
1347     }
1348 
1349   } else if (opr1->is_double_cpu()) {
1350     assert(!unsigned_comp, "unexpected");
1351     Register xlo = opr1->as_register_lo();
1352     Register xhi = opr1->as_register_hi();
1353     if (opr2->is_double_cpu()) {
1354       __ z_cgr(xlo, opr2->as_register_lo());
1355     } else if (opr2->is_constant()) {
1356       // cpu register - constant 0
1357       assert(opr2->as_jlong() == (jlong)0, "only handles zero");
1358       __ z_ltgr(xlo, xlo);
1359     } else {
1360       ShouldNotReachHere();
1361     }
1362 
1363   } else if (opr1->is_single_fpu()) {
1364     if (opr2->is_single_fpu()) {
1365       __ z_cebr(opr1->as_float_reg(), opr2->as_float_reg());
1366     } else {
1367       // stack slot
1368       Address addr = frame_map()->address_for_slot(opr2->single_stack_ix());
1369       if (Immediate::is_uimm12(addr.disp())) {
1370         __ z_ceb(opr1->as_float_reg(), addr);
1371       } else {
1372         __ z_ley(Z_fscratch_1, addr);
1373         __ z_cebr(opr1->as_float_reg(), Z_fscratch_1);
1374       }
1375     }
1376   } else if (opr1->is_double_fpu()) {
1377     if (opr2->is_double_fpu()) {
1378     __ z_cdbr(opr1->as_double_reg(), opr2->as_double_reg());
1379     } else {
1380       // stack slot
1381       Address addr = frame_map()->address_for_slot(opr2->double_stack_ix());
1382       if (Immediate::is_uimm12(addr.disp())) {
1383         __ z_cdb(opr1->as_double_reg(), addr);
1384       } else {
1385         __ z_ldy(Z_fscratch_1, addr);
1386         __ z_cdbr(opr1->as_double_reg(), Z_fscratch_1);
1387       }
1388     }
1389   } else {
1390     ShouldNotReachHere();
1391   }
1392 }
1393 
1394 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
1395   Label    done;
1396   Register dreg = dst->as_register();
1397 
1398   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1399     assert((left->is_single_fpu() && right->is_single_fpu()) ||
1400            (left->is_double_fpu() && right->is_double_fpu()), "unexpected operand types");
1401     bool is_single = left->is_single_fpu();
1402     bool is_unordered_less = (code == lir_ucmp_fd2i);
1403     FloatRegister lreg = is_single ? left->as_float_reg() : left->as_double_reg();
1404     FloatRegister rreg = is_single ? right->as_float_reg() : right->as_double_reg();
1405     if (is_single) {
1406       __ z_cebr(lreg, rreg);
1407     } else {
1408       __ z_cdbr(lreg, rreg);
1409     }
1410     if (VM_Version::has_LoadStoreConditional()) {
1411       Register one       = Z_R0_scratch;
1412       Register minus_one = Z_R1_scratch;
1413       __ z_lghi(minus_one, -1);
1414       __ z_lghi(one,  1);
1415       __ z_lghi(dreg, 0);
1416       __ z_locgr(dreg, one,       is_unordered_less ? Assembler::bcondHigh            : Assembler::bcondHighOrNotOrdered);
1417       __ z_locgr(dreg, minus_one, is_unordered_less ? Assembler::bcondLowOrNotOrdered : Assembler::bcondLow);
1418     } else {
1419       __ clear_reg(dreg, true, false);
1420       __ z_bre(done); // if (left == right) dst = 0
1421 
1422       // if (left > right || ((code ~= cmpg) && (left <> right)) dst := 1
1423       __ z_lhi(dreg, 1);
1424       __ z_brc(is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered, done);
1425 
1426       // if (left < right || ((code ~= cmpl) && (left <> right)) dst := -1
1427       __ z_lhi(dreg, -1);
1428     }
1429   } else {
1430     assert(code == lir_cmp_l2i, "check");
1431     if (VM_Version::has_LoadStoreConditional()) {
1432       Register one       = Z_R0_scratch;
1433       Register minus_one = Z_R1_scratch;
1434       __ z_cgr(left->as_register_lo(), right->as_register_lo());
1435       __ z_lghi(minus_one, -1);
1436       __ z_lghi(one,  1);
1437       __ z_lghi(dreg, 0);
1438       __ z_locgr(dreg, one, Assembler::bcondHigh);
1439       __ z_locgr(dreg, minus_one, Assembler::bcondLow);
1440     } else {
1441       __ z_cgr(left->as_register_lo(), right->as_register_lo());
1442       __ z_lghi(dreg,  0);     // eq value
1443       __ z_bre(done);
1444       __ z_lghi(dreg,  1);     // gt value
1445       __ z_brh(done);
1446       __ z_lghi(dreg, -1);     // lt value
1447     }
1448   }
1449   __ bind(done);
1450 }
1451 
1452 // result = condition ? opr1 : opr2
1453 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1454                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1455   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on s390");
1456 
1457   Assembler::branch_condition acond = Assembler::bcondEqual, ncond = Assembler::bcondNotEqual;
1458   switch (condition) {
1459     case lir_cond_equal:        acond = Assembler::bcondEqual;    ncond = Assembler::bcondNotEqual; break;
1460     case lir_cond_notEqual:     acond = Assembler::bcondNotEqual; ncond = Assembler::bcondEqual;    break;
1461     case lir_cond_less:         acond = Assembler::bcondLow;      ncond = Assembler::bcondNotLow;   break;
1462     case lir_cond_lessEqual:    acond = Assembler::bcondNotHigh;  ncond = Assembler::bcondHigh;     break;
1463     case lir_cond_greaterEqual: acond = Assembler::bcondNotLow;   ncond = Assembler::bcondLow;      break;
1464     case lir_cond_greater:      acond = Assembler::bcondHigh;     ncond = Assembler::bcondNotHigh;  break;
1465     case lir_cond_belowEqual:   acond = Assembler::bcondNotHigh;  ncond = Assembler::bcondHigh;     break;
1466     case lir_cond_aboveEqual:   acond = Assembler::bcondNotLow;   ncond = Assembler::bcondLow;      break;
1467     default:                    ShouldNotReachHere();
1468   }
1469 
1470   if (opr1->is_cpu_register()) {
1471     reg2reg(opr1, result);
1472   } else if (opr1->is_stack()) {
1473     stack2reg(opr1, result, result->type());
1474   } else if (opr1->is_constant()) {
1475     const2reg(opr1, result, lir_patch_none, nullptr);
1476   } else {
1477     ShouldNotReachHere();
1478   }
1479 
1480   if (VM_Version::has_LoadStoreConditional() && !opr2->is_constant()) {
1481     // Optimized version that does not require a branch.
1482     if (opr2->is_single_cpu()) {
1483       assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1484       __ z_locgr(result->as_register(), opr2->as_register(), ncond);
1485     } else if (opr2->is_double_cpu()) {
1486       assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1487       assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1488       __ z_locgr(result->as_register_lo(), opr2->as_register_lo(), ncond);
1489     } else if (opr2->is_single_stack()) {
1490       __ z_loc(result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()), ncond);
1491     } else if (opr2->is_double_stack()) {
1492       __ z_locg(result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix()), ncond);
1493     } else {
1494       ShouldNotReachHere();
1495     }
1496   } else {
1497     Label skip;
1498     __ z_brc(acond, skip);
1499     if (opr2->is_cpu_register()) {
1500       reg2reg(opr2, result);
1501     } else if (opr2->is_stack()) {
1502       stack2reg(opr2, result, result->type());
1503     } else if (opr2->is_constant()) {
1504       const2reg(opr2, result, lir_patch_none, nullptr);
1505     } else {
1506       ShouldNotReachHere();
1507     }
1508     __ bind(skip);
1509   }
1510 }
1511 
1512 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
1513                              CodeEmitInfo* info) {
1514   assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1515 
1516   if (left->is_single_cpu()) {
1517     assert(left == dest, "left and dest must be equal");
1518     Register lreg = left->as_register();
1519 
1520     if (right->is_single_cpu()) {
1521       // cpu register - cpu register
1522       Register rreg = right->as_register();
1523       switch (code) {
1524         case lir_add: __ z_ar (lreg, rreg); break;
1525         case lir_sub: __ z_sr (lreg, rreg); break;
1526         case lir_mul: __ z_msr(lreg, rreg); break;
1527         default: ShouldNotReachHere();
1528       }
1529 
1530     } else if (right->is_stack()) {
1531       // cpu register - stack
1532       Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1533       switch (code) {
1534         case lir_add: __ z_ay(lreg, raddr); break;
1535         case lir_sub: __ z_sy(lreg, raddr); break;
1536         default: ShouldNotReachHere();
1537       }
1538 
1539     } else if (right->is_constant()) {
1540       // cpu register - constant
1541       jint c = right->as_constant_ptr()->as_jint();
1542       switch (code) {
1543         case lir_add:
1544                       __ add2reg_32(lreg, c);
1545                       break;
1546         case lir_sub:
1547                       __ add2reg_32(lreg, java_negate(c));
1548                       break;
1549         case lir_mul: __ z_msfi(lreg, c);  break;
1550         default: ShouldNotReachHere();
1551       }
1552 
1553     } else {
1554       ShouldNotReachHere();
1555     }
1556 
1557   } else if (left->is_double_cpu()) {
1558     assert(left == dest, "left and dest must be equal");
1559     Register lreg_lo = left->as_register_lo();
1560     Register lreg_hi = left->as_register_hi();
1561 
1562     if (right->is_double_cpu()) {
1563       // cpu register - cpu register
1564       Register rreg_lo = right->as_register_lo();
1565       Register rreg_hi = right->as_register_hi();
1566       assert_different_registers(lreg_lo, rreg_lo);
1567       switch (code) {
1568         case lir_add:
1569           __ z_agr(lreg_lo, rreg_lo);
1570           break;
1571         case lir_sub:
1572           __ z_sgr(lreg_lo, rreg_lo);
1573           break;
1574         case lir_mul:
1575           __ z_msgr(lreg_lo, rreg_lo);
1576           break;
1577         default:
1578           ShouldNotReachHere();
1579       }
1580 
1581     } else if (right->is_constant()) {
1582       // cpu register - constant
1583       jlong c = right->as_constant_ptr()->as_jlong_bits();
1584       switch (code) {
1585         case lir_add: __ z_agfi(lreg_lo, c); break;
1586         case lir_sub:
1587           if (c != min_jint) {
1588                       __ z_agfi(lreg_lo, -c);
1589           } else {
1590             // -min_jint cannot be represented as simm32 in z_agfi
1591             // min_jint sign extended:      0xffffffff80000000
1592             // -min_jint as 64 bit integer: 0x0000000080000000
1593             // 0x80000000 can be represented as uimm32 in z_algfi
1594             // lreg_lo := lreg_lo + -min_jint == lreg_lo + 0x80000000
1595                       __ z_algfi(lreg_lo, UCONST64(0x80000000));
1596           }
1597           break;
1598         case lir_mul: __ z_msgfi(lreg_lo, c); break;
1599         default:
1600           ShouldNotReachHere();
1601       }
1602 
1603     } else {
1604       ShouldNotReachHere();
1605     }
1606 
1607   } else if (left->is_single_fpu()) {
1608     assert(left == dest, "left and dest must be equal");
1609     FloatRegister lreg = left->as_float_reg();
1610     FloatRegister rreg = right->is_single_fpu() ? right->as_float_reg() : fnoreg;
1611     Address raddr;
1612 
1613     if (rreg == fnoreg) {
1614       assert(right->is_single_stack(), "constants should be loaded into register");
1615       raddr = frame_map()->address_for_slot(right->single_stack_ix());
1616       if (!Immediate::is_uimm12(raddr.disp())) {
1617         __ mem2freg_opt(rreg = Z_fscratch_1, raddr, false);
1618       }
1619     }
1620 
1621     if (rreg != fnoreg) {
1622       switch (code) {
1623         case lir_add: __ z_aebr(lreg, rreg);  break;
1624         case lir_sub: __ z_sebr(lreg, rreg);  break;
1625         case lir_mul: __ z_meebr(lreg, rreg); break;
1626         case lir_div: __ z_debr(lreg, rreg);  break;
1627         default: ShouldNotReachHere();
1628       }
1629     } else {
1630       switch (code) {
1631         case lir_add: __ z_aeb(lreg, raddr);  break;
1632         case lir_sub: __ z_seb(lreg, raddr);  break;
1633         case lir_mul: __ z_meeb(lreg, raddr);  break;
1634         case lir_div: __ z_deb(lreg, raddr);  break;
1635         default: ShouldNotReachHere();
1636       }
1637     }
1638   } else if (left->is_double_fpu()) {
1639     assert(left == dest, "left and dest must be equal");
1640     FloatRegister lreg = left->as_double_reg();
1641     FloatRegister rreg = right->is_double_fpu() ? right->as_double_reg() : fnoreg;
1642     Address raddr;
1643 
1644     if (rreg == fnoreg) {
1645       assert(right->is_double_stack(), "constants should be loaded into register");
1646       raddr = frame_map()->address_for_slot(right->double_stack_ix());
1647       if (!Immediate::is_uimm12(raddr.disp())) {
1648         __ mem2freg_opt(rreg = Z_fscratch_1, raddr, true);
1649       }
1650     }
1651 
1652     if (rreg != fnoreg) {
1653       switch (code) {
1654         case lir_add: __ z_adbr(lreg, rreg); break;
1655         case lir_sub: __ z_sdbr(lreg, rreg); break;
1656         case lir_mul: __ z_mdbr(lreg, rreg); break;
1657         case lir_div: __ z_ddbr(lreg, rreg); break;
1658         default: ShouldNotReachHere();
1659       }
1660     } else {
1661       switch (code) {
1662         case lir_add: __ z_adb(lreg, raddr); break;
1663         case lir_sub: __ z_sdb(lreg, raddr); break;
1664         case lir_mul: __ z_mdb(lreg, raddr); break;
1665         case lir_div: __ z_ddb(lreg, raddr); break;
1666         default: ShouldNotReachHere();
1667       }
1668     }
1669   } else if (left->is_address()) {
1670     assert(left == dest, "left and dest must be equal");
1671     assert(code == lir_add, "unsupported operation");
1672     assert(right->is_constant(), "unsupported operand");
1673     jint c = right->as_constant_ptr()->as_jint();
1674     LIR_Address* lir_addr = left->as_address_ptr();
1675     Address addr = as_Address(lir_addr);
1676     switch (lir_addr->type()) {
1677       case T_INT:
1678         __ add2mem_32(addr, c, Z_R1_scratch);
1679         break;
1680       case T_LONG:
1681         __ add2mem_64(addr, c, Z_R1_scratch);
1682         break;
1683       default:
1684         ShouldNotReachHere();
1685     }
1686   } else {
1687     ShouldNotReachHere();
1688   }
1689 }
1690 
1691 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1692   switch (code) {
1693     case lir_sqrt: {
1694       assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
1695       FloatRegister src_reg = value->as_double_reg();
1696       FloatRegister dst_reg = dest->as_double_reg();
1697       __ z_sqdbr(dst_reg, src_reg);
1698       break;
1699     }
1700     case lir_abs: {
1701       assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
1702       FloatRegister src_reg = value->as_double_reg();
1703       FloatRegister dst_reg = dest->as_double_reg();
1704       __ z_lpdbr(dst_reg, src_reg);
1705       break;
1706     }
1707     default: {
1708       ShouldNotReachHere();
1709       break;
1710     }
1711   }
1712 }
1713 
1714 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1715   if (left->is_single_cpu()) {
1716     Register reg = left->as_register();
1717     if (right->is_constant()) {
1718       int val = right->as_constant_ptr()->as_jint();
1719       switch (code) {
1720         case lir_logic_and: __ z_nilf(reg, val); break;
1721         case lir_logic_or:  __ z_oilf(reg, val); break;
1722         case lir_logic_xor: __ z_xilf(reg, val); break;
1723         default: ShouldNotReachHere();
1724       }
1725     } else if (right->is_stack()) {
1726       Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1727       switch (code) {
1728         case lir_logic_and: __ z_ny(reg, raddr); break;
1729         case lir_logic_or:  __ z_oy(reg, raddr); break;
1730         case lir_logic_xor: __ z_xy(reg, raddr); break;
1731         default: ShouldNotReachHere();
1732       }
1733     } else {
1734       Register rright = right->as_register();
1735       switch (code) {
1736         case lir_logic_and: __ z_nr(reg, rright); break;
1737         case lir_logic_or : __ z_or(reg, rright); break;
1738         case lir_logic_xor: __ z_xr(reg, rright); break;
1739         default: ShouldNotReachHere();
1740       }
1741     }
1742     move_regs(reg, dst->as_register());
1743   } else {
1744     Register l_lo = left->as_register_lo();
1745     if (right->is_constant()) {
1746       __ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong());
1747       switch (code) {
1748         case lir_logic_and:
1749           __ z_ngr(l_lo, Z_R1_scratch);
1750           break;
1751         case lir_logic_or:
1752           __ z_ogr(l_lo, Z_R1_scratch);
1753           break;
1754         case lir_logic_xor:
1755           __ z_xgr(l_lo, Z_R1_scratch);
1756           break;
1757         default: ShouldNotReachHere();
1758       }
1759     } else {
1760       Register r_lo;
1761       if (is_reference_type(right->type())) {
1762         r_lo = right->as_register();
1763       } else {
1764         r_lo = right->as_register_lo();
1765       }
1766       switch (code) {
1767         case lir_logic_and:
1768           __ z_ngr(l_lo, r_lo);
1769           break;
1770         case lir_logic_or:
1771           __ z_ogr(l_lo, r_lo);
1772           break;
1773         case lir_logic_xor:
1774           __ z_xgr(l_lo, r_lo);
1775           break;
1776         default: ShouldNotReachHere();
1777       }
1778     }
1779 
1780     Register dst_lo = dst->as_register_lo();
1781 
1782     move_regs(l_lo, dst_lo);
1783   }
1784 }
1785 
1786 // See operand selection in LIRGenerator::do_ArithmeticOp_Int().
1787 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
1788   if (left->is_double_cpu()) {
1789     // 64 bit integer case
1790     assert(left->is_double_cpu(), "left must be register");
1791     assert(right->is_double_cpu() || is_power_of_2(right->as_jlong()),
1792            "right must be register or power of 2 constant");
1793     assert(result->is_double_cpu(), "result must be register");
1794 
1795     Register lreg = left->as_register_lo();
1796     Register dreg = result->as_register_lo();
1797 
1798     if (right->is_constant()) {
1799       // Convert division by a power of two into some shifts and logical operations.
1800       Register treg1 = Z_R0_scratch;
1801       Register treg2 = Z_R1_scratch;
1802       jlong divisor = right->as_jlong();
1803       jlong log_divisor = log2i_exact(right->as_jlong());
1804 
1805       if (divisor == min_jlong) {
1806         // Min_jlong is special. Result is '0' except for min_jlong/min_jlong = 1.
1807         if (dreg == lreg) {
1808           NearLabel done;
1809           __ load_const_optimized(treg2, min_jlong);
1810           __ z_cgr(lreg, treg2);
1811           __ z_lghi(dreg, 0);           // Preserves condition code.
1812           __ z_brne(done);
1813           __ z_lghi(dreg, 1);           // min_jlong / min_jlong = 1
1814           __ bind(done);
1815         } else {
1816           assert_different_registers(dreg, lreg);
1817           NearLabel done;
1818           __ z_lghi(dreg, 0);
1819           __ compare64_and_branch(lreg, min_jlong, Assembler::bcondNotEqual, done);
1820           __ z_lghi(dreg, 1);
1821           __ bind(done);
1822         }
1823         return;
1824       }
1825       __ move_reg_if_needed(dreg, T_LONG, lreg, T_LONG);
1826       if (divisor == 2) {
1827         __ z_srlg(treg2, dreg, 63);     // dividend < 0 ? 1 : 0
1828       } else {
1829         __ z_srag(treg2, dreg, 63);     // dividend < 0 ? -1 : 0
1830         __ and_imm(treg2, divisor - 1, treg1, true);
1831       }
1832       if (code == lir_idiv) {
1833         __ z_agr(dreg, treg2);
1834         __ z_srag(dreg, dreg, log_divisor);
1835       } else {
1836         assert(code == lir_irem, "check");
1837         __ z_agr(treg2, dreg);
1838         __ and_imm(treg2, ~(divisor - 1), treg1, true);
1839         __ z_sgr(dreg, treg2);
1840       }
1841       return;
1842     }
1843 
1844     // Divisor is not a power of 2 constant.
1845     Register rreg = right->as_register_lo();
1846     Register treg = temp->as_register_lo();
1847     assert(right->is_double_cpu(), "right must be register");
1848     assert(lreg == Z_R11, "see ldivInOpr()");
1849     assert(rreg != lreg, "right register must not be same as left register");
1850     assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) ||
1851            (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see ldivInOpr(), ldivOutOpr(), lremOutOpr()");
1852 
1853     Register R1 = lreg->predecessor();
1854     Register R2 = rreg;
1855     assert(code != lir_idiv || lreg==dreg, "see code below");
1856     if (code == lir_idiv) {
1857       __ z_lcgr(lreg, lreg);
1858     } else {
1859       __ clear_reg(dreg, true, false);
1860     }
1861     NearLabel done;
1862     __ compare64_and_branch(R2, -1, Assembler::bcondEqual, done);
1863     if (code == lir_idiv) {
1864       __ z_lcgr(lreg, lreg); // Revert lcgr above.
1865     }
1866     if (ImplicitDiv0Checks) {
1867       // No debug info because the idiv won't trap.
1868       // Add_debug_info_for_div0 would instantiate another DivByZeroStub,
1869       // which is unnecessary, too.
1870       add_debug_info_for_div0(__ offset(), info);
1871     }
1872     __ z_dsgr(R1, R2);
1873     __ bind(done);
1874     return;
1875   }
1876 
1877   // 32 bit integer case
1878 
1879   assert(left->is_single_cpu(), "left must be register");
1880   assert(right->is_single_cpu() || is_power_of_2(right->as_jint()), "right must be register or power of 2 constant");
1881   assert(result->is_single_cpu(), "result must be register");
1882 
1883   Register lreg = left->as_register();
1884   Register dreg = result->as_register();
1885 
1886   if (right->is_constant()) {
1887     // Convert division by a power of two into some shifts and logical operations.
1888     Register treg1 = Z_R0_scratch;
1889     Register treg2 = Z_R1_scratch;
1890     jlong divisor = right->as_jint();
1891     jlong log_divisor = log2i_exact(right->as_jint());
1892     __ move_reg_if_needed(dreg, T_LONG, lreg, T_INT); // sign extend
1893     if (divisor == 2) {
1894       __ z_srlg(treg2, dreg, 63);     // dividend < 0 ?  1 : 0
1895     } else {
1896       __ z_srag(treg2, dreg, 63);     // dividend < 0 ? -1 : 0
1897       __ and_imm(treg2, divisor - 1, treg1, true);
1898     }
1899     if (code == lir_idiv) {
1900       __ z_agr(dreg, treg2);
1901       __ z_srag(dreg, dreg, log_divisor);
1902     } else {
1903       assert(code == lir_irem, "check");
1904       __ z_agr(treg2, dreg);
1905       __ and_imm(treg2, ~(divisor - 1), treg1, true);
1906       __ z_sgr(dreg, treg2);
1907     }
1908     return;
1909   }
1910 
1911   // Divisor is not a power of 2 constant.
1912   Register rreg = right->as_register();
1913   Register treg = temp->as_register();
1914   assert(right->is_single_cpu(), "right must be register");
1915   assert(lreg == Z_R11, "left register must be rax,");
1916   assert(rreg != lreg, "right register must not be same as left register");
1917   assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10)
1918       || (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see divInOpr(), divOutOpr(), remOutOpr()");
1919 
1920   Register R1 = lreg->predecessor();
1921   Register R2 = rreg;
1922   __ move_reg_if_needed(lreg, T_LONG, lreg, T_INT); // sign extend
1923   if (ImplicitDiv0Checks) {
1924     // No debug info because the idiv won't trap.
1925     // Add_debug_info_for_div0 would instantiate another DivByZeroStub,
1926     // which is unnecessary, too.
1927     add_debug_info_for_div0(__ offset(), info);
1928   }
1929   __ z_dsgfr(R1, R2);
1930 }
1931 
1932 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1933   assert(exceptionOop->as_register() == Z_EXC_OOP, "should match");
1934   assert(exceptionPC->as_register() == Z_EXC_PC, "should match");
1935 
1936   // Exception object is not added to oop map by LinearScan
1937   // (LinearScan assumes that no oops are in fixed registers).
1938   info->add_register_oop(exceptionOop);
1939 
1940   // Reuse the debug info from the safepoint poll for the throw op itself.
1941   __ get_PC(Z_EXC_PC);
1942   add_call_info(__ offset(), info); // for exception handler
1943   address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? StubId::c1_handle_exception_id
1944                                                                     : StubId::c1_handle_exception_nofpu_id);
1945   emit_call_c(stub);
1946 }
1947 
1948 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1949   assert(exceptionOop->as_register() == Z_EXC_OOP, "should match");
1950 
1951   __ branch_optimized(Assembler::bcondAlways, _unwind_handler_entry);
1952 }
1953 
1954 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
1955   ciArrayKlass* default_type = op->expected_type();
1956   Register src = op->src()->as_register();
1957   Register dst = op->dst()->as_register();
1958   Register src_pos = op->src_pos()->as_register();
1959   Register dst_pos = op->dst_pos()->as_register();
1960   Register length  = op->length()->as_register();
1961   Register tmp = op->tmp()->as_register();
1962 
1963   CodeStub* stub = op->stub();
1964   int flags = op->flags();
1965   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
1966   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
1967 
1968   // If we don't know anything, just go through the generic arraycopy.
1969   if (default_type == nullptr) {
1970     address copyfunc_addr = StubRoutines::generic_arraycopy();
1971 
1972     if (copyfunc_addr == nullptr) {
1973       // Take a slow path for generic arraycopy.
1974       __ branch_optimized(Assembler::bcondAlways, *stub->entry());
1975       __ bind(*stub->continuation());
1976       return;
1977     }
1978 
1979     // Save outgoing arguments in callee saved registers (C convention) in case
1980     // a call to System.arraycopy is needed.
1981     Register callee_saved_src     = Z_R10;
1982     Register callee_saved_src_pos = Z_R11;
1983     Register callee_saved_dst     = Z_R12;
1984     Register callee_saved_dst_pos = Z_R13;
1985     Register callee_saved_length  = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved.
1986 
1987     __ lgr_if_needed(callee_saved_src, src);
1988     __ lgr_if_needed(callee_saved_src_pos, src_pos);
1989     __ lgr_if_needed(callee_saved_dst, dst);
1990     __ lgr_if_needed(callee_saved_dst_pos, dst_pos);
1991     __ lgr_if_needed(callee_saved_length, length);
1992 
1993     // C function requires 64 bit values.
1994     __ z_lgfr(src_pos, src_pos);
1995     __ z_lgfr(dst_pos, dst_pos);
1996     __ z_lgfr(length, length);
1997 
1998     // Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint.
1999 
2000     // The arguments are in the corresponding registers.
2001     assert(Z_ARG1 == src,     "assumption");
2002     assert(Z_ARG2 == src_pos, "assumption");
2003     assert(Z_ARG3 == dst,     "assumption");
2004     assert(Z_ARG4 == dst_pos, "assumption");
2005     assert(Z_ARG5 == length,  "assumption");
2006 #ifndef PRODUCT
2007     if (PrintC1Statistics) {
2008       __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt);
2009       __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
2010     }
2011 #endif
2012     emit_call_c(copyfunc_addr);
2013     CHECK_BAILOUT();
2014 
2015     __ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation());
2016 
2017     __ z_lgr(tmp, Z_RET);
2018     __ z_xilf(tmp, -1);
2019 
2020     // Restore values from callee saved registers so they are where the stub
2021     // expects them.
2022     __ lgr_if_needed(src, callee_saved_src);
2023     __ lgr_if_needed(src_pos, callee_saved_src_pos);
2024     __ lgr_if_needed(dst, callee_saved_dst);
2025     __ lgr_if_needed(dst_pos, callee_saved_dst_pos);
2026     __ lgr_if_needed(length, callee_saved_length);
2027 
2028     __ z_sr(length, tmp);
2029     __ z_ar(src_pos, tmp);
2030     __ z_ar(dst_pos, tmp);
2031     __ branch_optimized(Assembler::bcondAlways, *stub->entry());
2032 
2033     __ bind(*stub->continuation());
2034     return;
2035   }
2036 
2037   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2038 
2039   int elem_size = type2aelembytes(basic_type);
2040   int shift_amount;
2041 
2042   switch (elem_size) {
2043     case 1 :
2044       shift_amount = 0;
2045       break;
2046     case 2 :
2047       shift_amount = 1;
2048       break;
2049     case 4 :
2050       shift_amount = 2;
2051       break;
2052     case 8 :
2053       shift_amount = 3;
2054       break;
2055     default:
2056       shift_amount = -1;
2057       ShouldNotReachHere();
2058   }
2059 
2060   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2061   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2062 
2063   // Length and pos's are all sign extended at this point on 64bit.
2064 
2065   // test for null
2066   if (flags & LIR_OpArrayCopy::src_null_check) {
2067     __ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry());
2068   }
2069   if (flags & LIR_OpArrayCopy::dst_null_check) {
2070     __ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondZero, *stub->entry());
2071   }
2072 
2073   // Check if negative.
2074   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2075     __ compare32_and_branch(src_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry());
2076   }
2077   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2078     __ compare32_and_branch(dst_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry());
2079   }
2080 
2081   // If the compiler was not able to prove that exact type of the source or the destination
2082   // of the arraycopy is an array type, check at runtime if the source or the destination is
2083   // an instance type.
2084   if (flags & LIR_OpArrayCopy::type_check) {
2085     assert(Klass::_lh_neutral_value == 0, "or replace z_lt instructions");
2086 
2087     if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2088       __ load_klass(tmp, dst);
2089       __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2090       __ branch_optimized(Assembler::bcondNotLow, *stub->entry());
2091     }
2092 
2093     if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2094       __ load_klass(tmp, src);
2095       __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2096       __ branch_optimized(Assembler::bcondNotLow, *stub->entry());
2097     }
2098   }
2099 
2100   if (flags & LIR_OpArrayCopy::src_range_check) {
2101     __ z_la(tmp, Address(src_pos, length));
2102     __ z_cl(tmp, src_length_addr);
2103     __ branch_optimized(Assembler::bcondHigh, *stub->entry());
2104   }
2105   if (flags & LIR_OpArrayCopy::dst_range_check) {
2106     __ z_la(tmp, Address(dst_pos, length));
2107     __ z_cl(tmp, dst_length_addr);
2108     __ branch_optimized(Assembler::bcondHigh, *stub->entry());
2109   }
2110 
2111   if (flags & LIR_OpArrayCopy::length_positive_check) {
2112     __ z_ltr(length, length);
2113     __ branch_optimized(Assembler::bcondNegative, *stub->entry());
2114   }
2115 
2116   // Stubs require 64 bit values.
2117   __ z_lgfr(src_pos, src_pos); // int -> long
2118   __ z_lgfr(dst_pos, dst_pos); // int -> long
2119   __ z_lgfr(length, length);   // int -> long
2120 
2121   if (flags & LIR_OpArrayCopy::type_check) {
2122     // We don't know the array types are compatible.
2123     if (basic_type != T_OBJECT) {
2124       // Simple test for basic type arrays.
2125       __ cmp_klasses_from_objects(src, dst, tmp, Z_R1_scratch);
2126       __ branch_optimized(Assembler::bcondNotEqual, *stub->entry());
2127     } else {
2128       // For object arrays, if src is a sub class of dst then we can
2129       // safely do the copy.
2130       NearLabel cont, slow;
2131       Register src_klass = Z_R1_scratch;
2132       Register dst_klass = Z_R10;
2133 
2134       __ load_klass(src_klass, src);
2135       __ load_klass(dst_klass, dst);
2136 
2137       __ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, nullptr);
2138 
2139       store_parameter(src_klass, 0); // sub
2140       store_parameter(dst_klass, 1); // super
2141       emit_call_c(Runtime1::entry_for (StubId::c1_slow_subtype_check_id));
2142       CHECK_BAILOUT2(cont, slow);
2143       // Sets condition code 0 for match (2 otherwise).
2144       __ branch_optimized(Assembler::bcondEqual, cont);
2145 
2146       __ bind(slow);
2147 
2148       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2149       if (copyfunc_addr != nullptr) { // use stub if available
2150         // Src is not a sub class of dst so we have to do a
2151         // per-element check.
2152 
2153         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2154         if ((flags & mask) != mask) {
2155           // Check that at least both of them object arrays.
2156           assert(flags & mask, "one of the two should be known to be an object array");
2157 
2158           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2159             __ load_klass(tmp, src);
2160           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2161             __ load_klass(tmp, dst);
2162           }
2163           Address klass_lh_addr(tmp, Klass::layout_helper_offset());
2164           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2165           __ load_const_optimized(Z_R1_scratch, objArray_lh);
2166           __ z_c(Z_R1_scratch, klass_lh_addr);
2167           __ branch_optimized(Assembler::bcondNotEqual, *stub->entry());
2168         }
2169 
2170         // Save outgoing arguments in callee saved registers (C convention) in case
2171         // a call to System.arraycopy is needed.
2172         Register callee_saved_src     = Z_R10;
2173         Register callee_saved_src_pos = Z_R11;
2174         Register callee_saved_dst     = Z_R12;
2175         Register callee_saved_dst_pos = Z_R13;
2176         Register callee_saved_length  = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved.
2177 
2178         __ lgr_if_needed(callee_saved_src, src);
2179         __ lgr_if_needed(callee_saved_src_pos, src_pos);
2180         __ lgr_if_needed(callee_saved_dst, dst);
2181         __ lgr_if_needed(callee_saved_dst_pos, dst_pos);
2182         __ lgr_if_needed(callee_saved_length, length);
2183 
2184         __ z_llgfr(length, length); // Higher 32bits must be null.
2185 
2186         __ z_sllg(Z_ARG1, src_pos, shift_amount); // index -> byte offset
2187         __ z_sllg(Z_ARG2, dst_pos, shift_amount); // index -> byte offset
2188 
2189         __ z_la(Z_ARG1, Address(src, Z_ARG1, arrayOopDesc::base_offset_in_bytes(basic_type)));
2190         assert_different_registers(Z_ARG1, dst, dst_pos, length);
2191         __ z_la(Z_ARG2, Address(dst, Z_ARG2, arrayOopDesc::base_offset_in_bytes(basic_type)));
2192         assert_different_registers(Z_ARG2, dst, length);
2193 
2194         __ z_lgr(Z_ARG3, length);
2195         assert_different_registers(Z_ARG3, dst);
2196 
2197         __ load_klass(Z_ARG5, dst);
2198         __ z_lg(Z_ARG5, Address(Z_ARG5, ObjArrayKlass::element_klass_offset()));
2199         __ z_lg(Z_ARG4, Address(Z_ARG5, Klass::super_check_offset_offset()));
2200         emit_call_c(copyfunc_addr);
2201         CHECK_BAILOUT2(cont, slow);
2202 
2203 #ifndef PRODUCT
2204         if (PrintC1Statistics) {
2205           NearLabel failed;
2206           __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondNotEqual, failed);
2207           __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_cnt);
2208           __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
2209           __ bind(failed);
2210         }
2211 #endif
2212 
2213         __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation());
2214 
2215 #ifndef PRODUCT
2216         if (PrintC1Statistics) {
2217           __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_attempt_cnt);
2218           __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
2219         }
2220 #endif
2221 
2222         __ z_lgr(tmp, Z_RET);
2223         __ z_xilf(tmp, -1);
2224 
2225         // Restore previously spilled arguments
2226         __ lgr_if_needed(src, callee_saved_src);
2227         __ lgr_if_needed(src_pos, callee_saved_src_pos);
2228         __ lgr_if_needed(dst, callee_saved_dst);
2229         __ lgr_if_needed(dst_pos, callee_saved_dst_pos);
2230         __ lgr_if_needed(length, callee_saved_length);
2231 
2232         __ z_sr(length, tmp);
2233         __ z_ar(src_pos, tmp);
2234         __ z_ar(dst_pos, tmp);
2235       }
2236 
2237       __ branch_optimized(Assembler::bcondAlways, *stub->entry());
2238 
2239       __ bind(cont);
2240     }
2241   }
2242 
2243 #ifdef ASSERT
2244   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2245     // Sanity check the known type with the incoming class. For the
2246     // primitive case the types must match exactly with src.klass and
2247     // dst.klass each exactly matching the default type. For the
2248     // object array case, if no type check is needed then either the
2249     // dst type is exactly the expected type and the src type is a
2250     // subtype which we can't check or src is the same array as dst
2251     // but not necessarily exactly of type default_type.
2252     NearLabel known_ok, halt;
2253     metadata2reg(default_type->constant_encoding(), tmp);
2254     if (UseCompressedClassPointers) {
2255       __ encode_klass_not_null(tmp);
2256     }
2257 
2258     if (basic_type != T_OBJECT) {
2259       __ cmp_klass(tmp, dst, Z_R1_scratch);
2260       __ branch_optimized(Assembler::bcondNotEqual, halt);
2261 
2262       __ cmp_klass(tmp, src, Z_R1_scratch);
2263       __ branch_optimized(Assembler::bcondEqual, known_ok);
2264     } else {
2265       __ cmp_klass(tmp, dst, Z_R1_scratch);
2266       __ branch_optimized(Assembler::bcondEqual, known_ok);
2267       __ compareU64_and_branch(src, dst, Assembler::bcondEqual, known_ok);
2268     }
2269     __ bind(halt);
2270     __ stop("incorrect type information in arraycopy");
2271     __ bind(known_ok);
2272   }
2273 #endif
2274 
2275 #ifndef PRODUCT
2276   if (PrintC1Statistics) {
2277     __ load_const_optimized(Z_R1_scratch, Runtime1::arraycopy_count_address(basic_type));
2278     __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
2279   }
2280 #endif
2281 
2282   __ z_sllg(tmp, src_pos, shift_amount); // index -> byte offset
2283   __ z_sllg(Z_R1_scratch, dst_pos, shift_amount); // index -> byte offset
2284 
2285   assert_different_registers(Z_ARG1, dst, dst_pos, length);
2286   __ z_la(Z_ARG1, Address(src, tmp, arrayOopDesc::base_offset_in_bytes(basic_type)));
2287   assert_different_registers(Z_ARG2, length);
2288   __ z_la(Z_ARG2, Address(dst, Z_R1_scratch, arrayOopDesc::base_offset_in_bytes(basic_type)));
2289   __ lgr_if_needed(Z_ARG3, length);
2290 
2291   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2292   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2293   const char *name;
2294   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2295   __ call_VM_leaf(entry);
2296 
2297   if (stub != nullptr) {
2298     __ bind(*stub->continuation());
2299   }
2300 }
2301 
2302 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2303   if (dest->is_single_cpu()) {
2304     if (left->type() == T_OBJECT) {
2305       switch (code) {
2306         case lir_shl:  __ z_sllg (dest->as_register(), left->as_register(), 0, count->as_register()); break;
2307         case lir_shr:  __ z_srag (dest->as_register(), left->as_register(), 0, count->as_register()); break;
2308         case lir_ushr: __ z_srlg (dest->as_register(), left->as_register(), 0, count->as_register()); break;
2309         default: ShouldNotReachHere();
2310       }
2311     } else {
2312       assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts");
2313       Register masked_count = Z_R1_scratch;
2314       __ z_lr(masked_count, count->as_register());
2315       __ z_nill(masked_count, 31);
2316       switch (code) {
2317         case lir_shl:  __ z_sllg (dest->as_register(), left->as_register(), 0, masked_count); break;
2318         case lir_shr:  __ z_sra  (dest->as_register(), 0, masked_count); break;
2319         case lir_ushr: __ z_srl  (dest->as_register(), 0, masked_count); break;
2320         default: ShouldNotReachHere();
2321       }
2322     }
2323   } else {
2324     switch (code) {
2325       case lir_shl:  __ z_sllg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break;
2326       case lir_shr:  __ z_srag (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break;
2327       case lir_ushr: __ z_srlg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break;
2328       default: ShouldNotReachHere();
2329     }
2330   }
2331 }
2332 
2333 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2334   if (left->type() == T_OBJECT) {
2335     count = count & 63;  // Shouldn't shift by more than sizeof(intptr_t).
2336     Register l = left->as_register();
2337     Register d = dest->as_register_lo();
2338     switch (code) {
2339       case lir_shl:  __ z_sllg (d, l, count); break;
2340       case lir_shr:  __ z_srag (d, l, count); break;
2341       case lir_ushr: __ z_srlg (d, l, count); break;
2342       default: ShouldNotReachHere();
2343     }
2344     return;
2345   }
2346   if (dest->is_single_cpu()) {
2347     assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts");
2348     count = count & 0x1F; // Java spec
2349     switch (code) {
2350       case lir_shl:  __ z_sllg (dest->as_register(), left->as_register(), count); break;
2351       case lir_shr:  __ z_sra  (dest->as_register(), count); break;
2352       case lir_ushr: __ z_srl  (dest->as_register(), count); break;
2353       default: ShouldNotReachHere();
2354     }
2355   } else if (dest->is_double_cpu()) {
2356     count = count & 63; // Java spec
2357     Register l = left->as_pointer_register();
2358     Register d = dest->as_pointer_register();
2359     switch (code) {
2360       case lir_shl:  __ z_sllg (d, l, count); break;
2361       case lir_shr:  __ z_srag (d, l, count); break;
2362       case lir_ushr: __ z_srlg (d, l, count); break;
2363       default: ShouldNotReachHere();
2364     }
2365   } else {
2366     ShouldNotReachHere();
2367   }
2368 }
2369 
2370 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2371   if (op->init_check()) {
2372     // Make sure klass is initialized & doesn't have finalizer.
2373     // init_state needs acquire, but S390 is TSO, and so we are already good.
2374     const int state_offset = in_bytes(InstanceKlass::init_state_offset());
2375     Register iklass = op->klass()->as_register();
2376     add_debug_info_for_null_check_here(op->stub()->info());
2377     if (Immediate::is_uimm12(state_offset)) {
2378       __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized);
2379     } else {
2380       __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized);
2381     }
2382     __ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far.
2383   }
2384   __ allocate_object(op->obj()->as_register(),
2385                      op->tmp1()->as_register(),
2386                      op->tmp2()->as_register(),
2387                      op->header_size(),
2388                      op->object_size(),
2389                      op->klass()->as_register(),
2390                      *op->stub()->entry());
2391   __ bind(*op->stub()->continuation());
2392   __ verify_oop(op->obj()->as_register(), FILE_AND_LINE);
2393 }
2394 
2395 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2396   Register len = op->len()->as_register();
2397   __ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend
2398 
2399   if (UseSlowPath ||
2400       (!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
2401       (!UseFastNewTypeArray   && (!is_reference_type(op->type())))) {
2402     __ z_brul(*op->stub()->entry());
2403   } else {
2404     __ allocate_array(op->obj()->as_register(),
2405                       op->len()->as_register(),
2406                       op->tmp1()->as_register(),
2407                       op->tmp2()->as_register(),
2408                       arrayOopDesc::base_offset_in_bytes(op->type()),
2409                       type2aelembytes(op->type()),
2410                       op->klass()->as_register(),
2411                       *op->stub()->entry(),
2412                       op->zero_array());
2413   }
2414   __ bind(*op->stub()->continuation());
2415 }
2416 
2417 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data,
2418                                         Register recv, Register tmp1, Label* update_done) {
2419   uint i;
2420   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2421     Label next_test;
2422     // See if the receiver is receiver[n].
2423     Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
2424     __ z_cg(recv, receiver_addr);
2425     __ z_brne(next_test);
2426     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
2427     __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1);
2428     __ branch_optimized(Assembler::bcondAlways, *update_done);
2429     __ bind(next_test);
2430   }
2431 
2432   // Didn't find receiver; find next empty slot and fill it in.
2433   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2434     Label next_test;
2435     Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
2436     __ z_ltg(Z_R0_scratch, recv_addr);
2437     __ z_brne(next_test);
2438     __ z_stg(recv, recv_addr);
2439     __ load_const_optimized(tmp1, DataLayout::counter_increment);
2440     __ z_stg(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)), mdo);
2441     __ branch_optimized(Assembler::bcondAlways, *update_done);
2442     __ bind(next_test);
2443   }
2444 }
2445 
2446 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2447                                     ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2448   Unimplemented();
2449 }
2450 
2451 void LIR_Assembler::store_parameter(Register r, int param_num) {
2452   assert(param_num >= 0, "invalid num");
2453   int offset_in_bytes = param_num * BytesPerWord;
2454   check_reserved_argument_area(offset_in_bytes);
2455   offset_in_bytes += FrameMap::first_available_sp_in_frame;
2456   __ z_stg(r, offset_in_bytes, Z_SP);
2457 }
2458 
2459 void LIR_Assembler::store_parameter(jint c, int param_num) {
2460   assert(param_num >= 0, "invalid num");
2461   int offset_in_bytes = param_num * BytesPerWord;
2462   check_reserved_argument_area(offset_in_bytes);
2463   offset_in_bytes += FrameMap::first_available_sp_in_frame;
2464   __ store_const(Address(Z_SP, offset_in_bytes), c, Z_R1_scratch, true);
2465 }
2466 
2467 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2468   // We always need a stub for the failure case.
2469   CodeStub* stub = op->stub();
2470   Register obj = op->object()->as_register();
2471   Register k_RInfo = op->tmp1()->as_register();
2472   Register klass_RInfo = op->tmp2()->as_register();
2473   Register dst = op->result_opr()->as_register();
2474   Register Rtmp1 = Z_R1_scratch;
2475   ciKlass* k = op->klass();
2476 
2477   assert(!op->tmp3()->is_valid(), "tmp3's not needed");
2478 
2479   // Check if it needs to be profiled.
2480   ciMethodData* md = nullptr;
2481   ciProfileData* data = nullptr;
2482 
2483   if (op->should_profile()) {
2484     ciMethod* method = op->profiled_method();
2485     assert(method != nullptr, "Should have method");
2486     int bci = op->profiled_bci();
2487     md = method->method_data_or_null();
2488     assert(md != nullptr, "Sanity");
2489     data = md->bci_to_data(bci);
2490     assert(data != nullptr,                "need data for type check");
2491     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2492   }
2493 
2494   // Temp operands do not overlap with inputs, if this is their last
2495   // use (end of range is exclusive), so a register conflict is possible.
2496   if (obj == k_RInfo) {
2497     k_RInfo = dst;
2498   } else if (obj == klass_RInfo) {
2499     klass_RInfo = dst;
2500   }
2501   assert_different_registers(obj, k_RInfo, klass_RInfo);
2502 
2503   if (op->should_profile()) {
2504     Register mdo = klass_RInfo;
2505     metadata2reg(md->constant_encoding(), mdo);
2506     NearLabel not_null;
2507     __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondNotEqual, not_null);
2508     // Object is null; update MDO and exit.
2509     Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
2510     int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
2511     __ or2mem_8(data_addr, header_bits);
2512     __ branch_optimized(Assembler::bcondAlways, *obj_is_null);
2513     __ bind(not_null);
2514 
2515     NearLabel update_done;
2516     Register recv = k_RInfo;
2517     __ load_klass(recv, obj);
2518     type_profile_helper(mdo, md, data, recv, Rtmp1, &update_done);
2519     Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2520     __ add2mem_64(counter_addr, DataLayout::counter_increment, Rtmp1);
2521     __ bind(update_done);
2522   } else {
2523     __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondEqual, *obj_is_null);
2524   }
2525 
2526   Label *failure_target = failure;
2527   Label *success_target = success;
2528 
2529   // Patching may screw with our temporaries,
2530   // so let's do it before loading the class.
2531   if (k->is_loaded()) {
2532     metadata2reg(k->constant_encoding(), k_RInfo);
2533   } else {
2534     klass2reg_with_patching(k_RInfo, op->info_for_patch());
2535   }
2536   assert(obj != k_RInfo, "must be different");
2537 
2538   __ verify_oop(obj, FILE_AND_LINE);
2539 
2540   // Get object class.
2541   // Not a safepoint as obj null check happens earlier.
2542   if (op->fast_check()) {
2543     if (UseCompressedClassPointers) {
2544       __ load_klass(klass_RInfo, obj);
2545       __ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target);
2546     } else {
2547       __ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
2548       __ branch_optimized(Assembler::bcondNotEqual, *failure_target);
2549     }
2550     // Successful cast, fall through to profile or jump.
2551   } else {
2552     bool need_slow_path = !k->is_loaded() ||
2553                           ((int) k->super_check_offset() == in_bytes(Klass::secondary_super_cache_offset()));
2554     __ load_klass(klass_RInfo, obj);
2555     // Perform the fast part of the checking logic.
2556     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1,
2557                                      (need_slow_path ? success_target : nullptr),
2558                                      failure_target, nullptr);
2559     if (need_slow_path) {
2560       // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2561       address a = Runtime1::entry_for (StubId::c1_slow_subtype_check_id);
2562       store_parameter(klass_RInfo, 0); // sub
2563       store_parameter(k_RInfo, 1);     // super
2564       emit_call_c(a); // Sets condition code 0 for match (2 otherwise).
2565       __ branch_optimized(Assembler::bcondNotEqual, *failure_target);
2566       // Fall through to success case.
2567     }
2568   }
2569 
2570   __ branch_optimized(Assembler::bcondAlways, *success);
2571 }
2572 
2573 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2574   LIR_Code code = op->code();
2575   if (code == lir_store_check) {
2576     Register value = op->object()->as_register();
2577     Register array = op->array()->as_register();
2578     Register k_RInfo = op->tmp1()->as_register();
2579     Register klass_RInfo = op->tmp2()->as_register();
2580     Register Rtmp1 = Z_R1_scratch;
2581 
2582     CodeStub* stub = op->stub();
2583 
2584     // Check if it needs to be profiled.
2585     ciMethodData* md = nullptr;
2586     ciProfileData* data = nullptr;
2587 
2588     assert_different_registers(value, k_RInfo, klass_RInfo);
2589 
2590     if (op->should_profile()) {
2591       ciMethod* method = op->profiled_method();
2592       assert(method != nullptr, "Should have method");
2593       int bci = op->profiled_bci();
2594       md = method->method_data_or_null();
2595       assert(md != nullptr, "Sanity");
2596       data = md->bci_to_data(bci);
2597       assert(data != nullptr,                "need data for type check");
2598       assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2599     }
2600     NearLabel done;
2601     Label *success_target = &done;
2602     Label *failure_target = stub->entry();
2603 
2604     if (op->should_profile()) {
2605       Register mdo = klass_RInfo;
2606       metadata2reg(md->constant_encoding(), mdo);
2607       NearLabel not_null;
2608       __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondNotEqual, not_null);
2609       // Object is null; update MDO and exit.
2610       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
2611       int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
2612       __ or2mem_8(data_addr, header_bits);
2613       __ branch_optimized(Assembler::bcondAlways, done);
2614       __ bind(not_null);
2615 
2616       NearLabel update_done;
2617       Register recv = k_RInfo;
2618       __ load_klass(recv, value);
2619       type_profile_helper(mdo, md, data, recv, Rtmp1, &update_done);
2620       Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2621       __ add2mem_64(counter_addr, DataLayout::counter_increment, Rtmp1);
2622       __ bind(update_done);
2623     } else {
2624       __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondEqual, done);
2625     }
2626 
2627     add_debug_info_for_null_check_here(op->info_for_exception());
2628     __ load_klass(k_RInfo, array);
2629     __ load_klass(klass_RInfo, value);
2630 
2631     // Get instance klass (it's already uncompressed).
2632     __ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
2633     // Perform the fast part of the checking logic.
2634     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
2635     // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2636     address a = Runtime1::entry_for (StubId::c1_slow_subtype_check_id);
2637     store_parameter(klass_RInfo, 0); // sub
2638     store_parameter(k_RInfo, 1);     // super
2639     emit_call_c(a); // Sets condition code 0 for match (2 otherwise).
2640     __ branch_optimized(Assembler::bcondNotEqual, *failure_target);
2641     // Fall through to success case.
2642 
2643     __ bind(done);
2644   } else {
2645     if (code == lir_checkcast) {
2646       Register obj = op->object()->as_register();
2647       Register dst = op->result_opr()->as_register();
2648       NearLabel success;
2649       emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
2650       __ bind(success);
2651       __ lgr_if_needed(dst, obj);
2652     } else {
2653       if (code == lir_instanceof) {
2654         Register obj = op->object()->as_register();
2655         Register dst = op->result_opr()->as_register();
2656         NearLabel success, failure, done;
2657         emit_typecheck_helper(op, &success, &failure, &failure);
2658         __ bind(failure);
2659         __ clear_reg(dst);
2660         __ branch_optimized(Assembler::bcondAlways, done);
2661         __ bind(success);
2662         __ load_const_optimized(dst, 1);
2663         __ bind(done);
2664       } else {
2665         ShouldNotReachHere();
2666       }
2667     }
2668   }
2669 }
2670 
2671 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2672   Register addr = op->addr()->as_pointer_register();
2673   Register t1_cmp = Z_R1_scratch;
2674   if (op->code() == lir_cas_long) {
2675     Register cmp_value_lo = op->cmp_value()->as_register_lo();
2676     Register new_value_lo = op->new_value()->as_register_lo();
2677     __ z_lgr(t1_cmp, cmp_value_lo);
2678     // Perform the compare and swap operation.
2679     __ z_csg(t1_cmp, new_value_lo, 0, addr);
2680   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2681     Register cmp_value = op->cmp_value()->as_register();
2682     Register new_value = op->new_value()->as_register();
2683     if (op->code() == lir_cas_obj) {
2684       if (UseCompressedOops) {
2685                  t1_cmp = op->tmp1()->as_register();
2686         Register t2_new = op->tmp2()->as_register();
2687         assert_different_registers(cmp_value, new_value, addr, t1_cmp, t2_new);
2688         __ oop_encoder(t1_cmp, cmp_value, true /*maybe null*/);
2689         __ oop_encoder(t2_new, new_value, true /*maybe null*/);
2690         __ z_cs(t1_cmp, t2_new, 0, addr);
2691       } else {
2692         __ z_lgr(t1_cmp, cmp_value);
2693         __ z_csg(t1_cmp, new_value, 0, addr);
2694       }
2695     } else {
2696       __ z_lr(t1_cmp, cmp_value);
2697       __ z_cs(t1_cmp, new_value, 0, addr);
2698     }
2699   } else {
2700     ShouldNotReachHere(); // new lir_cas_??
2701   }
2702 }
2703 
2704 void LIR_Assembler::breakpoint() {
2705   Unimplemented();
2706   //  __ breakpoint_trap();
2707 }
2708 
2709 void LIR_Assembler::push(LIR_Opr opr) {
2710   ShouldNotCallThis(); // unused
2711 }
2712 
2713 void LIR_Assembler::pop(LIR_Opr opr) {
2714   ShouldNotCallThis(); // unused
2715 }
2716 
2717 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2718   Address addr = frame_map()->address_for_monitor_lock(monitor_no);
2719   __ add2reg(dst_opr->as_register(), addr.disp(), addr.base());
2720 }
2721 
2722 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2723   Register obj = op->obj_opr()->as_register();  // May not be an oop.
2724   Register hdr = op->hdr_opr()->as_register();
2725   Register lock = op->lock_opr()->as_register();
2726   if (op->code() == lir_lock) {
2727     // Add debug info for NullPointerException only if one is possible.
2728     if (op->info() != nullptr) {
2729       add_debug_info_for_null_check_here(op->info());
2730     }
2731     __ lock_object(hdr, obj, lock, *op->stub()->entry());
2732     // done
2733   } else if (op->code() == lir_unlock) {
2734     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2735   } else {
2736     ShouldNotReachHere();
2737   }
2738   __ bind(*op->stub()->continuation());
2739 }
2740 
2741 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2742   Register obj = op->obj()->as_pointer_register();
2743   Register result = op->result_opr()->as_pointer_register();
2744 
2745   CodeEmitInfo* info = op->info();
2746   if (info != nullptr) {
2747     add_debug_info_for_null_check_here(info);
2748   }
2749 
2750   __ load_klass(result, obj);
2751 }
2752 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2753   ciMethod* method = op->profiled_method();
2754   int bci          = op->profiled_bci();
2755   ciMethod* callee = op->profiled_callee();
2756 
2757   // Update counter for all call types.
2758   ciMethodData* md = method->method_data_or_null();
2759   assert(md != nullptr, "Sanity");
2760   ciProfileData* data = md->bci_to_data(bci);
2761   assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
2762   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2763   Register mdo  = op->mdo()->as_register();
2764   assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2765   Register tmp1 = op->tmp1()->as_register_lo();
2766   metadata2reg(md->constant_encoding(), mdo);
2767 
2768   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2769   // Perform additional virtual call profiling for invokevirtual and
2770   // invokeinterface bytecodes
2771   if (op->should_profile_receiver_type()) {
2772     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2773     Register recv = op->recv()->as_register();
2774     assert_different_registers(mdo, tmp1, recv);
2775     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2776     ciKlass* known_klass = op->known_holder();
2777     if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
2778       // We know the type that will be seen at this call site; we can
2779       // statically update the MethodData* rather than needing to do
2780       // dynamic tests on the receiver type.
2781 
2782       // NOTE: we should probably put a lock around this search to
2783       // avoid collisions by concurrent compilations.
2784       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2785       uint i;
2786       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2787         ciKlass* receiver = vc_data->receiver(i);
2788         if (known_klass->equals(receiver)) {
2789           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2790           __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1);
2791           return;
2792         }
2793       }
2794 
2795       // Receiver type not found in profile data. Select an empty slot.
2796 
2797       // Note that this is less efficient than it should be because it
2798       // always does a write to the receiver part of the
2799       // VirtualCallData rather than just the first time.
2800       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2801         ciKlass* receiver = vc_data->receiver(i);
2802         if (receiver == nullptr) {
2803           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2804           metadata2reg(known_klass->constant_encoding(), tmp1);
2805           __ z_stg(tmp1, recv_addr);
2806           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2807           __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1);
2808           return;
2809         }
2810       }
2811     } else {
2812       __ load_klass(recv, recv);
2813       NearLabel update_done;
2814       type_profile_helper(mdo, md, data, recv, tmp1, &update_done);
2815       // Receiver did not match any saved receiver and there is no empty row for it.
2816       // Increment total counter to indicate polymorphic case.
2817       __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1);
2818       __ bind(update_done);
2819     }
2820   } else {
2821     // static call
2822     __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1);
2823   }
2824 }
2825 
2826 void LIR_Assembler::align_backward_branch_target() {
2827   __ align(OptoLoopAlignment);
2828 }
2829 
2830 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2831   // tmp must be unused
2832   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2833   assert(left->is_register(), "can only handle registers");
2834 
2835   if (left->is_single_cpu()) {
2836     __ z_lcr(dest->as_register(), left->as_register());
2837   } else if (left->is_single_fpu()) {
2838     __ z_lcebr(dest->as_float_reg(), left->as_float_reg());
2839   } else if (left->is_double_fpu()) {
2840     __ z_lcdbr(dest->as_double_reg(), left->as_double_reg());
2841   } else {
2842     assert(left->is_double_cpu(), "Must be a long");
2843     __ z_lcgr(dest->as_register_lo(), left->as_register_lo());
2844   }
2845 }
2846 
2847 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
2848                             const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2849   assert(!tmp->is_valid(), "don't need temporary");
2850   emit_call_c(dest);
2851   CHECK_BAILOUT();
2852   if (info != nullptr) {
2853     add_call_info_here(info);
2854   }
2855 }
2856 
2857 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2858   ShouldNotCallThis(); // not needed on ZARCH_64
2859 }
2860 
2861 void LIR_Assembler::membar() {
2862   __ z_fence();
2863 }
2864 
2865 void LIR_Assembler::membar_acquire() {
2866   __ z_acquire();
2867 }
2868 
2869 void LIR_Assembler::membar_release() {
2870   __ z_release();
2871 }
2872 
2873 void LIR_Assembler::membar_loadload() {
2874   __ z_acquire();
2875 }
2876 
2877 void LIR_Assembler::membar_storestore() {
2878   __ z_release();
2879 }
2880 
2881 void LIR_Assembler::membar_loadstore() {
2882   __ z_acquire();
2883 }
2884 
2885 void LIR_Assembler::membar_storeload() {
2886   __ z_fence();
2887 }
2888 
2889 void LIR_Assembler::on_spin_wait() {
2890   Unimplemented();
2891 }
2892 
2893 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2894   assert(patch_code == lir_patch_none, "Patch code not supported");
2895   LIR_Address* addr = addr_opr->as_address_ptr();
2896   assert(addr->scale() == LIR_Address::times_1, "scaling unsupported");
2897   __ load_address(dest->as_pointer_register(), as_Address(addr));
2898 }
2899 
2900 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2901   ShouldNotCallThis(); // unused
2902 }
2903 
2904 #ifdef ASSERT
2905 // Emit run-time assertion.
2906 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2907   Unimplemented();
2908 }
2909 #endif
2910 
2911 void LIR_Assembler::peephole(LIR_List*) {
2912   // Do nothing for now.
2913 }
2914 
2915 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
2916   assert(code == lir_xadd, "lir_xchg not supported");
2917   Address src_addr = as_Address(src->as_address_ptr());
2918   Register base = src_addr.base();
2919   intptr_t disp = src_addr.disp();
2920   if (src_addr.index()->is_valid()) {
2921     // LAA and LAAG do not support index register.
2922     __ load_address(Z_R1_scratch, src_addr);
2923     base = Z_R1_scratch;
2924     disp = 0;
2925   }
2926   if (data->type() == T_INT) {
2927     __ z_laa(dest->as_register(), data->as_register(), disp, base);
2928   } else if (data->type() == T_LONG) {
2929     assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
2930     __ z_laag(dest->as_register_lo(), data->as_register_lo(), disp, base);
2931   } else {
2932     ShouldNotReachHere();
2933   }
2934 }
2935 
2936 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2937   Register obj = op->obj()->as_register();
2938   Register tmp1 = op->tmp()->as_pointer_register();
2939   Register tmp2 = Z_R1_scratch;
2940   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2941   ciKlass* exact_klass = op->exact_klass();
2942   intptr_t current_klass = op->current_klass();
2943   bool not_null = op->not_null();
2944   bool no_conflict = op->no_conflict();
2945 
2946   Label update, next, none, null_seen, init_klass;
2947 
2948   bool do_null = !not_null;
2949   bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2950   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2951 
2952   assert(do_null || do_update, "why are we here?");
2953   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2954 
2955   __ verify_oop(obj, FILE_AND_LINE);
2956 
2957   if (do_null || tmp1 != obj DEBUG_ONLY(|| true)) {
2958     __ z_ltgr(tmp1, obj);
2959   }
2960   if (do_null) {
2961     __ z_brnz(update);
2962     if (!TypeEntries::was_null_seen(current_klass)) {
2963       __ z_lg(tmp1, mdo_addr);
2964       __ z_oill(tmp1, TypeEntries::null_seen);
2965       __ z_stg(tmp1, mdo_addr);
2966     }
2967     if (do_update) {
2968       __ z_bru(next);
2969     }
2970   } else {
2971     __ asm_assert(Assembler::bcondNotZero, "unexpected null obj", __LINE__);
2972   }
2973 
2974   __ bind(update);
2975 
2976   if (do_update) {
2977 #ifdef ASSERT
2978     if (exact_klass != nullptr) {
2979       __ load_klass(tmp1, tmp1);
2980       metadata2reg(exact_klass->constant_encoding(), tmp2);
2981       __ z_cgr(tmp1, tmp2);
2982       __ asm_assert(Assembler::bcondEqual, "exact klass and actual klass differ", __LINE__);
2983     }
2984 #endif
2985 
2986     Label do_update;
2987     __ z_lg(tmp2, mdo_addr);
2988 
2989     if (!no_conflict) {
2990       if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
2991         if (exact_klass != nullptr) {
2992           metadata2reg(exact_klass->constant_encoding(), tmp1);
2993         } else {
2994           __ load_klass(tmp1, tmp1);
2995         }
2996 
2997         // Klass seen before: nothing to do (regardless of unknown bit).
2998         __ z_lgr(Z_R0_scratch, tmp2);
2999         assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction");
3000         __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF);
3001         __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next);
3002 
3003         // Already unknown: Nothing to do anymore.
3004         __ z_tmll(tmp2, TypeEntries::type_unknown);
3005         __ z_brc(Assembler::bcondAllOne, next);
3006 
3007         if (TypeEntries::is_type_none(current_klass)) {
3008           __ z_lgr(Z_R0_scratch, tmp2);
3009           assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction");
3010           __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF);
3011           __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass);
3012         }
3013       } else {
3014         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3015                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3016 
3017         // Already unknown: Nothing to do anymore.
3018         __ z_tmll(tmp2, TypeEntries::type_unknown);
3019         __ z_brc(Assembler::bcondAllOne, next);
3020       }
3021 
3022       // Different than before. Cannot keep accurate profile.
3023       __ z_oill(tmp2, TypeEntries::type_unknown);
3024       __ z_bru(do_update);
3025     } else {
3026       // There's a single possible klass at this profile point.
3027       assert(exact_klass != nullptr, "should be");
3028       if (TypeEntries::is_type_none(current_klass)) {
3029         metadata2reg(exact_klass->constant_encoding(), tmp1);
3030         __ z_lgr(Z_R0_scratch, tmp2);
3031         assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction");
3032         __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF);
3033         __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next);
3034 #ifdef ASSERT
3035         {
3036           Label ok;
3037           __ z_lgr(Z_R0_scratch, tmp2);
3038           assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction");
3039           __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF);
3040           __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, ok);
3041           __ stop("unexpected profiling mismatch");
3042           __ bind(ok);
3043         }
3044 #endif
3045 
3046       } else {
3047         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3048                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3049 
3050         // Already unknown: Nothing to do anymore.
3051         __ z_tmll(tmp2, TypeEntries::type_unknown);
3052         __ z_brc(Assembler::bcondAllOne, next);
3053         __ z_oill(tmp2, TypeEntries::type_unknown);
3054         __ z_bru(do_update);
3055       }
3056     }
3057 
3058     __ bind(init_klass);
3059     // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
3060     __ z_ogr(tmp2, tmp1);
3061 
3062     __ bind(do_update);
3063     __ z_stg(tmp2, mdo_addr);
3064 
3065     __ bind(next);
3066   }
3067 }
3068 
3069 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3070   assert(op->crc()->is_single_cpu(), "crc must be register");
3071   assert(op->val()->is_single_cpu(), "byte value must be register");
3072   assert(op->result_opr()->is_single_cpu(), "result must be register");
3073   Register crc = op->crc()->as_register();
3074   Register val = op->val()->as_register();
3075   Register res = op->result_opr()->as_register();
3076 
3077   assert_different_registers(val, crc, res);
3078 
3079   __ load_const_optimized(res, StubRoutines::crc_table_addr());
3080   __ kernel_crc32_singleByteReg(crc, val, res, true);
3081   __ z_lgfr(res, crc);
3082 }
3083 
3084 #undef __