1 /*
   2  * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2023 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/codeBuffer.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/barrierSetAssembler.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "gc/shared/cardTableBarrierSet.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.hpp"
  37 #include "oops/accessDecorators.hpp"
  38 #include "oops/compressedKlass.inline.hpp"
  39 #include "oops/compressedOops.inline.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "prims/methodHandles.hpp"
  42 #include "registerSaver_s390.hpp"
  43 #include "runtime/icache.hpp"
  44 #include "runtime/interfaceSupport.inline.hpp"
  45 #include "runtime/objectMonitor.hpp"
  46 #include "runtime/os.hpp"
  47 #include "runtime/safepoint.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "runtime/stubRoutines.hpp"
  51 #include "utilities/events.hpp"
  52 #include "utilities/macros.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 
  55 #include <ucontext.h>
  56 
  57 #define BLOCK_COMMENT(str) block_comment(str)
  58 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
  59 
  60 // Move 32-bit register if destination and source are different.
  61 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
  62   if (rs != rd) { z_lr(rd, rs); }
  63 }
  64 
  65 // Move register if destination and source are different.
  66 void MacroAssembler::lgr_if_needed(Register rd, Register rs) {
  67   if (rs != rd) { z_lgr(rd, rs); }
  68 }
  69 
  70 // Zero-extend 32-bit register into 64-bit register if destination and source are different.
  71 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {
  72   if (rs != rd) { z_llgfr(rd, rs); }
  73 }
  74 
  75 // Move float register if destination and source are different.
  76 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {
  77   if (rs != rd) { z_ldr(rd, rs); }
  78 }
  79 
  80 // Move integer register if destination and source are different.
  81 // It is assumed that shorter-than-int types are already
  82 // appropriately sign-extended.
  83 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,
  84                                         BasicType src_type) {
  85   assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");
  86   assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");
  87 
  88   if (dst_type == src_type) {
  89     lgr_if_needed(dst, src); // Just move all 64 bits.
  90     return;
  91   }
  92 
  93   switch (dst_type) {
  94     // Do not support these types for now.
  95     //  case T_BOOLEAN:
  96     case T_BYTE:  // signed byte
  97       switch (src_type) {
  98         case T_INT:
  99           z_lgbr(dst, src);
 100           break;
 101         default:
 102           ShouldNotReachHere();
 103       }
 104       return;
 105 
 106     case T_CHAR:
 107     case T_SHORT:
 108       switch (src_type) {
 109         case T_INT:
 110           if (dst_type == T_CHAR) {
 111             z_llghr(dst, src);
 112           } else {
 113             z_lghr(dst, src);
 114           }
 115           break;
 116         default:
 117           ShouldNotReachHere();
 118       }
 119       return;
 120 
 121     case T_INT:
 122       switch (src_type) {
 123         case T_BOOLEAN:
 124         case T_BYTE:
 125         case T_CHAR:
 126         case T_SHORT:
 127         case T_INT:
 128         case T_LONG:
 129         case T_OBJECT:
 130         case T_ARRAY:
 131         case T_VOID:
 132         case T_ADDRESS:
 133           lr_if_needed(dst, src);
 134           // llgfr_if_needed(dst, src);  // zero-extend (in case we need to find a bug).
 135           return;
 136 
 137         default:
 138           assert(false, "non-integer src type");
 139           return;
 140       }
 141     case T_LONG:
 142       switch (src_type) {
 143         case T_BOOLEAN:
 144         case T_BYTE:
 145         case T_CHAR:
 146         case T_SHORT:
 147         case T_INT:
 148           z_lgfr(dst, src); // sign extension
 149           return;
 150 
 151         case T_LONG:
 152         case T_OBJECT:
 153         case T_ARRAY:
 154         case T_VOID:
 155         case T_ADDRESS:
 156           lgr_if_needed(dst, src);
 157           return;
 158 
 159         default:
 160           assert(false, "non-integer src type");
 161           return;
 162       }
 163       return;
 164     case T_OBJECT:
 165     case T_ARRAY:
 166     case T_VOID:
 167     case T_ADDRESS:
 168       switch (src_type) {
 169         // These types don't make sense to be converted to pointers:
 170         //      case T_BOOLEAN:
 171         //      case T_BYTE:
 172         //      case T_CHAR:
 173         //      case T_SHORT:
 174 
 175         case T_INT:
 176           z_llgfr(dst, src); // zero extension
 177           return;
 178 
 179         case T_LONG:
 180         case T_OBJECT:
 181         case T_ARRAY:
 182         case T_VOID:
 183         case T_ADDRESS:
 184           lgr_if_needed(dst, src);
 185           return;
 186 
 187         default:
 188           assert(false, "non-integer src type");
 189           return;
 190       }
 191       return;
 192     default:
 193       assert(false, "non-integer dst type");
 194       return;
 195   }
 196 }
 197 
 198 // Move float register if destination and source are different.
 199 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,
 200                                          FloatRegister src, BasicType src_type) {
 201   assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");
 202   assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");
 203   if (dst_type == src_type) {
 204     ldr_if_needed(dst, src); // Just move all 64 bits.
 205   } else {
 206     switch (dst_type) {
 207       case T_FLOAT:
 208         assert(src_type == T_DOUBLE, "invalid float type combination");
 209         z_ledbr(dst, src);
 210         return;
 211       case T_DOUBLE:
 212         assert(src_type == T_FLOAT, "invalid float type combination");
 213         z_ldebr(dst, src);
 214         return;
 215       default:
 216         assert(false, "non-float dst type");
 217         return;
 218     }
 219   }
 220 }
 221 
 222 // Optimized emitter for reg to mem operations.
 223 // Uses modern instructions if running on modern hardware, classic instructions
 224 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 225 // Data register (reg) cannot be used as work register.
 226 //
 227 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 228 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 229 void MacroAssembler::freg2mem_opt(FloatRegister reg,
 230                                   int64_t       disp,
 231                                   Register      index,
 232                                   Register      base,
 233                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 234                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 235                                   Register      scratch) {
 236   index = (index == noreg) ? Z_R0 : index;
 237   if (Displacement::is_shortDisp(disp)) {
 238     (this->*classic)(reg, disp, index, base);
 239   } else {
 240     if (Displacement::is_validDisp(disp)) {
 241       (this->*modern)(reg, disp, index, base);
 242     } else {
 243       if (scratch != Z_R0 && scratch != Z_R1) {
 244         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 245       } else {
 246         if (scratch != Z_R0) {   // scratch == Z_R1
 247           if ((scratch == index) || (index == base)) {
 248             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 249           } else {
 250             add2reg(scratch, disp, base);
 251             (this->*classic)(reg, 0, index, scratch);
 252             if (base == scratch) {
 253               add2reg(base, -disp);  // Restore base.
 254             }
 255           }
 256         } else {   // scratch == Z_R0
 257           z_lgr(scratch, base);
 258           add2reg(base, disp);
 259           (this->*classic)(reg, 0, index, base);
 260           z_lgr(base, scratch);      // Restore base.
 261         }
 262       }
 263     }
 264   }
 265 }
 266 
 267 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {
 268   if (is_double) {
 269     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));
 270   } else {
 271     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));
 272   }
 273 }
 274 
 275 // Optimized emitter for mem to reg operations.
 276 // Uses modern instructions if running on modern hardware, classic instructions
 277 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 278 // data register (reg) cannot be used as work register.
 279 //
 280 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 281 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 282 void MacroAssembler::mem2freg_opt(FloatRegister reg,
 283                                   int64_t       disp,
 284                                   Register      index,
 285                                   Register      base,
 286                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 287                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 288                                   Register      scratch) {
 289   index = (index == noreg) ? Z_R0 : index;
 290   if (Displacement::is_shortDisp(disp)) {
 291     (this->*classic)(reg, disp, index, base);
 292   } else {
 293     if (Displacement::is_validDisp(disp)) {
 294       (this->*modern)(reg, disp, index, base);
 295     } else {
 296       if (scratch != Z_R0 && scratch != Z_R1) {
 297         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 298       } else {
 299         if (scratch != Z_R0) {   // scratch == Z_R1
 300           if ((scratch == index) || (index == base)) {
 301             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 302           } else {
 303             add2reg(scratch, disp, base);
 304             (this->*classic)(reg, 0, index, scratch);
 305             if (base == scratch) {
 306               add2reg(base, -disp);  // Restore base.
 307             }
 308           }
 309         } else {   // scratch == Z_R0
 310           z_lgr(scratch, base);
 311           add2reg(base, disp);
 312           (this->*classic)(reg, 0, index, base);
 313           z_lgr(base, scratch);      // Restore base.
 314         }
 315       }
 316     }
 317   }
 318 }
 319 
 320 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {
 321   if (is_double) {
 322     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));
 323   } else {
 324     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));
 325   }
 326 }
 327 
 328 // Optimized emitter for reg to mem operations.
 329 // Uses modern instructions if running on modern hardware, classic instructions
 330 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 331 // Data register (reg) cannot be used as work register.
 332 //
 333 // Don't rely on register locking, instead pass a scratch register
 334 // (Z_R0 by default)
 335 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!
 336 void MacroAssembler::reg2mem_opt(Register reg,
 337                                  int64_t  disp,
 338                                  Register index,
 339                                  Register base,
 340                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 341                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
 342                                  Register scratch) {
 343   index = (index == noreg) ? Z_R0 : index;
 344   if (Displacement::is_shortDisp(disp)) {
 345     (this->*classic)(reg, disp, index, base);
 346   } else {
 347     if (Displacement::is_validDisp(disp)) {
 348       (this->*modern)(reg, disp, index, base);
 349     } else {
 350       if (scratch != Z_R0 && scratch != Z_R1) {
 351         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 352       } else {
 353         if (scratch != Z_R0) {   // scratch == Z_R1
 354           if ((scratch == index) || (index == base)) {
 355             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 356           } else {
 357             add2reg(scratch, disp, base);
 358             (this->*classic)(reg, 0, index, scratch);
 359             if (base == scratch) {
 360               add2reg(base, -disp);  // Restore base.
 361             }
 362           }
 363         } else {   // scratch == Z_R0
 364           if ((scratch == reg) || (scratch == base) || (reg == base)) {
 365             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 366           } else {
 367             z_lgr(scratch, base);
 368             add2reg(base, disp);
 369             (this->*classic)(reg, 0, index, base);
 370             z_lgr(base, scratch);    // Restore base.
 371           }
 372         }
 373       }
 374     }
 375   }
 376 }
 377 
 378 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {
 379   int store_offset = offset();
 380   if (is_double) {
 381     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));
 382   } else {
 383     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));
 384   }
 385   return store_offset;
 386 }
 387 
 388 // Optimized emitter for mem to reg operations.
 389 // Uses modern instructions if running on modern hardware, classic instructions
 390 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 391 // Data register (reg) will be used as work register where possible.
 392 void MacroAssembler::mem2reg_opt(Register reg,
 393                                  int64_t  disp,
 394                                  Register index,
 395                                  Register base,
 396                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 397                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {
 398   index = (index == noreg) ? Z_R0 : index;
 399   if (Displacement::is_shortDisp(disp)) {
 400     (this->*classic)(reg, disp, index, base);
 401   } else {
 402     if (Displacement::is_validDisp(disp)) {
 403       (this->*modern)(reg, disp, index, base);
 404     } else {
 405       if ((reg == index) && (reg == base)) {
 406         z_sllg(reg, reg, 1);
 407         add2reg(reg, disp);
 408         (this->*classic)(reg, 0, noreg, reg);
 409       } else if ((reg == index) && (reg != Z_R0)) {
 410         add2reg(reg, disp);
 411         (this->*classic)(reg, 0, reg, base);
 412       } else if (reg == base) {
 413         add2reg(reg, disp);
 414         (this->*classic)(reg, 0, index, reg);
 415       } else if (reg != Z_R0) {
 416         add2reg(reg, disp, base);
 417         (this->*classic)(reg, 0, index, reg);
 418       } else { // reg == Z_R0 && reg != base here
 419         add2reg(base, disp);
 420         (this->*classic)(reg, 0, index, base);
 421         add2reg(base, -disp);
 422       }
 423     }
 424   }
 425 }
 426 
 427 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {
 428   if (is_double) {
 429     z_lg(reg, a);
 430   } else {
 431     mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));
 432   }
 433 }
 434 
 435 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {
 436   mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));
 437 }
 438 
 439 void MacroAssembler::and_imm(Register r, long mask,
 440                              Register tmp /* = Z_R0 */,
 441                              bool wide    /* = false */) {
 442   assert(wide || Immediate::is_simm32(mask), "mask value too large");
 443 
 444   if (!wide) {
 445     z_nilf(r, mask);
 446     return;
 447   }
 448 
 449   assert(r != tmp, " need a different temporary register !");
 450   load_const_optimized(tmp, mask);
 451   z_ngr(r, tmp);
 452 }
 453 
 454 // Calculate the 1's complement.
 455 // Note: The condition code is neither preserved nor correctly set by this code!!!
 456 // Note: (wide == false) does not protect the high order half of the target register
 457 //       from alteration. It only serves as optimization hint for 32-bit results.
 458 void MacroAssembler::not_(Register r1, Register r2, bool wide) {
 459 
 460   if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.
 461     z_xilf(r1, -1);
 462     if (wide) {
 463       z_xihf(r1, -1);
 464     }
 465   } else { // Distinct src and dst registers.
 466     load_const_optimized(r1, -1);
 467     z_xgr(r1, r2);
 468   }
 469 }
 470 
 471 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {
 472   assert(lBitPos >=  0,      "zero is  leftmost bit position");
 473   assert(rBitPos <= 63,      "63   is rightmost bit position");
 474   assert(lBitPos <= rBitPos, "inverted selection interval");
 475   return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));
 476 }
 477 
 478 // Helper function for the "Rotate_then_<logicalOP>" emitters.
 479 // Rotate src, then mask register contents such that only bits in range survive.
 480 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.
 481 // For oneBits == true,  all bits not in range are set to 1. Useful for preserving all bits outside range.
 482 // The caller must ensure that the selected range only contains bits with defined value.
 483 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
 484                                       int nRotate, bool src32bit, bool dst32bit, bool oneBits) {
 485   assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");
 486   bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).
 487   bool srl4rll = (nRotate <  0) && (-nRotate <= lBitPos);     // Substitute SRL(G) for RLL(G).
 488   //  Pre-determine which parts of dst will be zero after shift/rotate.
 489   bool llZero  =  sll4rll && (nRotate >= 16);
 490   bool lhZero  = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));
 491   bool lfZero  = llZero && lhZero;
 492   bool hlZero  = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));
 493   bool hhZero  =                                 (srl4rll && (nRotate <= -16));
 494   bool hfZero  = hlZero && hhZero;
 495 
 496   // rotate then mask src operand.
 497   // if oneBits == true,  all bits outside selected range are 1s.
 498   // if oneBits == false, all bits outside selected range are 0s.
 499   if (src32bit) {   // There might be garbage in the upper 32 bits which will get masked away.
 500     if (dst32bit) {
 501       z_rll(dst, src, nRotate);   // Copy and rotate, upper half of reg remains undisturbed.
 502     } else {
 503       if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 504       else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 505       else              { z_rllg(dst, src,  nRotate); }
 506     }
 507   } else {
 508     if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 509     else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 510     else              { z_rllg(dst, src,  nRotate); }
 511   }
 512 
 513   unsigned long  range_mask    = create_mask(lBitPos, rBitPos);
 514   unsigned int   range_mask_h  = (unsigned int)(range_mask >> 32);
 515   unsigned int   range_mask_l  = (unsigned int)range_mask;
 516   unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);
 517   unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);
 518   unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);
 519   unsigned short range_mask_ll = (unsigned short)range_mask;
 520   // Works for z9 and newer H/W.
 521   if (oneBits) {
 522     if ((~range_mask_l) != 0)                { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.
 523     if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }
 524   } else {
 525     // All bits outside range become 0s
 526     if (((~range_mask_l) != 0) &&              !lfZero) {
 527       z_nilf(dst, range_mask_l);
 528     }
 529     if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {
 530       z_nihf(dst, range_mask_h);
 531     }
 532   }
 533 }
 534 
 535 // Rotate src, then insert selected range from rotated src into dst.
 536 // Clear dst before, if requested.
 537 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,
 538                                         int nRotate, bool clear_dst) {
 539   // This version does not depend on src being zero-extended int2long.
 540   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 541   z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.
 542 }
 543 
 544 // Rotate src, then and selected range from rotated src into dst.
 545 // Set condition code only if so requested. Otherwise it is unpredictable.
 546 // See performance note in macroAssembler_s390.hpp for important information.
 547 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,
 548                                      int nRotate, bool test_only) {
 549   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 550   // This version does not depend on src being zero-extended int2long.
 551   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 552   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 553 }
 554 
 555 // Rotate src, then or selected range from rotated src into dst.
 556 // Set condition code only if so requested. Otherwise it is unpredictable.
 557 // See performance note in macroAssembler_s390.hpp for important information.
 558 void MacroAssembler::rotate_then_or(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 559                                     int nRotate, bool test_only) {
 560   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 561   // This version does not depend on src being zero-extended int2long.
 562   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 563   z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 564 }
 565 
 566 // Rotate src, then xor selected range from rotated src into dst.
 567 // Set condition code only if so requested. Otherwise it is unpredictable.
 568 // See performance note in macroAssembler_s390.hpp for important information.
 569 void MacroAssembler::rotate_then_xor(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 570                                      int nRotate, bool test_only) {
 571   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 572     // This version does not depend on src being zero-extended int2long.
 573   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 574   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 575 }
 576 
 577 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {
 578   if (inc.is_register()) {
 579     z_agr(r1, inc.as_register());
 580   } else { // constant
 581     intptr_t imm = inc.as_constant();
 582     add2reg(r1, imm);
 583   }
 584 }
 585 // Helper function to multiply the 64bit contents of a register by a 16bit constant.
 586 // The optimization tries to avoid the mghi instruction, since it uses the FPU for
 587 // calculation and is thus rather slow.
 588 //
 589 // There is no handling for special cases, e.g. cval==0 or cval==1.
 590 //
 591 // Returns len of generated code block.
 592 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {
 593   int block_start = offset();
 594 
 595   bool sign_flip = cval < 0;
 596   cval = sign_flip ? -cval : cval;
 597 
 598   BLOCK_COMMENT("Reg64*Con16 {");
 599 
 600   int bit1 = cval & -cval;
 601   if (bit1 == cval) {
 602     z_sllg(rval, rval, exact_log2(bit1));
 603     if (sign_flip) { z_lcgr(rval, rval); }
 604   } else {
 605     int bit2 = (cval-bit1) & -(cval-bit1);
 606     if ((bit1+bit2) == cval) {
 607       z_sllg(work, rval, exact_log2(bit1));
 608       z_sllg(rval, rval, exact_log2(bit2));
 609       z_agr(rval, work);
 610       if (sign_flip) { z_lcgr(rval, rval); }
 611     } else {
 612       if (sign_flip) { z_mghi(rval, -cval); }
 613       else           { z_mghi(rval,  cval); }
 614     }
 615   }
 616   BLOCK_COMMENT("} Reg64*Con16");
 617 
 618   int block_end = offset();
 619   return block_end - block_start;
 620 }
 621 
 622 // Generic operation r1 := r2 + imm.
 623 //
 624 // Should produce the best code for each supported CPU version.
 625 // r2 == noreg yields r1 := r1 + imm
 626 // imm == 0 emits either no instruction or r1 := r2 !
 627 // NOTES: 1) Don't use this function where fixed sized
 628 //           instruction sequences are required!!!
 629 //        2) Don't use this function if condition code
 630 //           setting is required!
 631 //        3) Despite being declared as int64_t, the parameter imm
 632 //           must be a simm_32 value (= signed 32-bit integer).
 633 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {
 634   assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");
 635 
 636   if (r2 == noreg) { r2 = r1; }
 637 
 638   // Handle special case imm == 0.
 639   if (imm == 0) {
 640     lgr_if_needed(r1, r2);
 641     // Nothing else to do.
 642     return;
 643   }
 644 
 645   if (!PreferLAoverADD || (r2 == Z_R0)) {
 646     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 647 
 648     // Can we encode imm in 16 bits signed?
 649     if (Immediate::is_simm16(imm)) {
 650       if (r1 == r2) {
 651         z_aghi(r1, imm);
 652         return;
 653       }
 654       if (distinctOpnds) {
 655         z_aghik(r1, r2, imm);
 656         return;
 657       }
 658       z_lgr(r1, r2);
 659       z_aghi(r1, imm);
 660       return;
 661     }
 662   } else {
 663     // Can we encode imm in 12 bits unsigned?
 664     if (Displacement::is_shortDisp(imm)) {
 665       z_la(r1, imm, r2);
 666       return;
 667     }
 668     // Can we encode imm in 20 bits signed?
 669     if (Displacement::is_validDisp(imm)) {
 670       // Always use LAY instruction, so we don't need the tmp register.
 671       z_lay(r1, imm, r2);
 672       return;
 673     }
 674 
 675   }
 676 
 677   // Can handle it (all possible values) with long immediates.
 678   lgr_if_needed(r1, r2);
 679   z_agfi(r1, imm);
 680 }
 681 
 682 // Generic operation r := b + x + d
 683 //
 684 // Addition of several operands with address generation semantics - sort of:
 685 //  - no restriction on the registers. Any register will do for any operand.
 686 //  - x == noreg: operand will be disregarded.
 687 //  - b == noreg: will use (contents of) result reg as operand (r := r + d).
 688 //  - x == Z_R0:  just disregard
 689 //  - b == Z_R0:  use as operand. This is not address generation semantics!!!
 690 //
 691 // The same restrictions as on add2reg() are valid!!!
 692 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {
 693   assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");
 694 
 695   if (x == noreg) { x = Z_R0; }
 696   if (b == noreg) { b = r; }
 697 
 698   // Handle special case x == R0.
 699   if (x == Z_R0) {
 700     // Can simply add the immediate value to the base register.
 701     add2reg(r, d, b);
 702     return;
 703   }
 704 
 705   if (!PreferLAoverADD || (b == Z_R0)) {
 706     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 707     // Handle special case d == 0.
 708     if (d == 0) {
 709       if (b == x)        { z_sllg(r, b, 1); return; }
 710       if (r == x)        { z_agr(r, b);     return; }
 711       if (r == b)        { z_agr(r, x);     return; }
 712       if (distinctOpnds) { z_agrk(r, x, b); return; }
 713       z_lgr(r, b);
 714       z_agr(r, x);
 715     } else {
 716       if (x == b)             { z_sllg(r, x, 1); }
 717       else if (r == x)        { z_agr(r, b); }
 718       else if (r == b)        { z_agr(r, x); }
 719       else if (distinctOpnds) { z_agrk(r, x, b); }
 720       else {
 721         z_lgr(r, b);
 722         z_agr(r, x);
 723       }
 724       add2reg(r, d);
 725     }
 726   } else {
 727     // Can we encode imm in 12 bits unsigned?
 728     if (Displacement::is_shortDisp(d)) {
 729       z_la(r, d, x, b);
 730       return;
 731     }
 732     // Can we encode imm in 20 bits signed?
 733     if (Displacement::is_validDisp(d)) {
 734       z_lay(r, d, x, b);
 735       return;
 736     }
 737     z_la(r, 0, x, b);
 738     add2reg(r, d);
 739   }
 740 }
 741 
 742 // Generic emitter (32bit) for direct memory increment.
 743 // For optimal code, do not specify Z_R0 as temp register.
 744 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {
 745   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 746     z_asi(a, imm);
 747   } else {
 748     z_lgf(tmp, a);
 749     add2reg(tmp, imm);
 750     z_st(tmp, a);
 751   }
 752 }
 753 
 754 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {
 755   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 756     z_agsi(a, imm);
 757   } else {
 758     z_lg(tmp, a);
 759     add2reg(tmp, imm);
 760     z_stg(tmp, a);
 761   }
 762 }
 763 
 764 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
 765   switch (size_in_bytes) {
 766     case  8: z_lg(dst, src); break;
 767     case  4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;
 768     case  2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;
 769     case  1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;
 770     default: ShouldNotReachHere();
 771   }
 772 }
 773 
 774 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
 775   switch (size_in_bytes) {
 776     case  8: z_stg(src, dst); break;
 777     case  4: z_st(src, dst); break;
 778     case  2: z_sth(src, dst); break;
 779     case  1: z_stc(src, dst); break;
 780     default: ShouldNotReachHere();
 781   }
 782 }
 783 
 784 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and
 785 // a high-order summand in register tmp.
 786 //
 787 // return value: <  0: No split required, si20 actually has property uimm12.
 788 //               >= 0: Split performed. Use return value as uimm12 displacement and
 789 //                     tmp as index register.
 790 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {
 791   assert(Immediate::is_simm20(si20_offset), "sanity");
 792   int lg_off = (int)si20_offset &  0x0fff; // Punch out low-order 12 bits, always positive.
 793   int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.
 794   assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||
 795          !Displacement::is_shortDisp(si20_offset), "unexpected offset values");
 796   assert((lg_off+ll_off) == si20_offset, "offset splitup error");
 797 
 798   Register work = accumulate? Z_R0 : tmp;
 799 
 800   if (fixed_codelen) {          // Len of code = 10 = 4 + 6.
 801     z_lghi(work, ll_off>>12);   // Implicit sign extension.
 802     z_slag(work, work, 12);
 803   } else {                      // Len of code = 0..10.
 804     if (ll_off == 0) { return -1; }
 805     // ll_off has 8 significant bits (at most) plus sign.
 806     if ((ll_off & 0x0000f000) == 0) {    // Non-zero bits only in upper halfbyte.
 807       z_llilh(work, ll_off >> 16);
 808       if (ll_off < 0) {                  // Sign-extension required.
 809         z_lgfr(work, work);
 810       }
 811     } else {
 812       if ((ll_off & 0x000f0000) == 0) {  // Non-zero bits only in lower halfbyte.
 813         z_llill(work, ll_off);
 814       } else {                           // Non-zero bits in both halfbytes.
 815         z_lghi(work, ll_off>>12);        // Implicit sign extension.
 816         z_slag(work, work, 12);
 817       }
 818     }
 819   }
 820   if (accumulate) { z_algr(tmp, work); } // len of code += 4
 821   return lg_off;
 822 }
 823 
 824 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 825   if (Displacement::is_validDisp(si20)) {
 826     z_ley(t, si20, a);
 827   } else {
 828     // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset
 829     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 830     // pool loads).
 831     bool accumulate    = true;
 832     bool fixed_codelen = true;
 833     Register work;
 834 
 835     if (fixed_codelen) {
 836       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 837     } else {
 838       accumulate = (a == tmp);
 839     }
 840     work = tmp;
 841 
 842     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 843     if (disp12 < 0) {
 844       z_le(t, si20, work);
 845     } else {
 846       if (accumulate) {
 847         z_le(t, disp12, work);
 848       } else {
 849         z_le(t, disp12, work, a);
 850       }
 851     }
 852   }
 853 }
 854 
 855 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 856   if (Displacement::is_validDisp(si20)) {
 857     z_ldy(t, si20, a);
 858   } else {
 859     // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset
 860     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 861     // pool loads).
 862     bool accumulate    = true;
 863     bool fixed_codelen = true;
 864     Register work;
 865 
 866     if (fixed_codelen) {
 867       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 868     } else {
 869       accumulate = (a == tmp);
 870     }
 871     work = tmp;
 872 
 873     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 874     if (disp12 < 0) {
 875       z_ld(t, si20, work);
 876     } else {
 877       if (accumulate) {
 878         z_ld(t, disp12, work);
 879       } else {
 880         z_ld(t, disp12, work, a);
 881       }
 882     }
 883   }
 884 }
 885 
 886 // PCrelative TOC access.
 887 // Returns distance (in bytes) from current position to start of consts section.
 888 // Returns 0 (zero) if no consts section exists or if it has size zero.
 889 long MacroAssembler::toc_distance() {
 890   CodeSection* cs = code()->consts();
 891   return (long)((cs != nullptr) ? cs->start()-pc() : 0);
 892 }
 893 
 894 // Implementation on x86/sparc assumes that constant and instruction section are
 895 // adjacent, but this doesn't hold. Two special situations may occur, that we must
 896 // be able to handle:
 897 //   1. const section may be located apart from the inst section.
 898 //   2. const section may be empty
 899 // In both cases, we use the const section's start address to compute the "TOC",
 900 // this seems to occur only temporarily; in the final step we always seem to end up
 901 // with the pc-relatice variant.
 902 //
 903 // PC-relative offset could be +/-2**32 -> use long for disp
 904 // Furthermore: makes no sense to have special code for
 905 // adjacent const and inst sections.
 906 void MacroAssembler::load_toc(Register Rtoc) {
 907   // Simply use distance from start of const section (should be patched in the end).
 908   long disp = toc_distance();
 909 
 910   RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);
 911   relocate(rspec);
 912   z_larl(Rtoc, RelAddr::pcrel_off32(disp));  // Offset is in halfwords.
 913 }
 914 
 915 // PCrelative TOC access.
 916 // Load from anywhere pcrelative (with relocation of load instr)
 917 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
 918   address          pc             = this->pc();
 919   ptrdiff_t        total_distance = dataLocation - pc;
 920   RelocationHolder rspec          = internal_word_Relocation::spec(dataLocation);
 921 
 922   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 923   assert(total_distance != 0, "sanity");
 924 
 925   // Some extra safety net.
 926   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 927     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 928   }
 929 
 930   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 931   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 932 }
 933 
 934 
 935 // PCrelative TOC access.
 936 // Load from anywhere pcrelative (with relocation of load instr)
 937 // loaded addr has to be relocated when added to constant pool.
 938 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
 939   address          pc             = this->pc();
 940   ptrdiff_t        total_distance = addrLocation - pc;
 941   RelocationHolder rspec          = internal_word_Relocation::spec(addrLocation);
 942 
 943   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 944 
 945   // Some extra safety net.
 946   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 947     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 948   }
 949 
 950   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 951   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 952 }
 953 
 954 // Generic operation: load a value from memory and test.
 955 // CondCode indicates the sign (<0, ==0, >0) of the loaded value.
 956 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {
 957   z_lb(dst, a);
 958   z_ltr(dst, dst);
 959 }
 960 
 961 void MacroAssembler::load_and_test_short(Register dst, const Address &a) {
 962   int64_t disp = a.disp20();
 963   if (Displacement::is_shortDisp(disp)) {
 964     z_lh(dst, a);
 965   } else if (Displacement::is_longDisp(disp)) {
 966     z_lhy(dst, a);
 967   } else {
 968     guarantee(false, "displacement out of range");
 969   }
 970   z_ltr(dst, dst);
 971 }
 972 
 973 void MacroAssembler::load_and_test_int(Register dst, const Address &a) {
 974   z_lt(dst, a);
 975 }
 976 
 977 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {
 978   z_ltgf(dst, a);
 979 }
 980 
 981 void MacroAssembler::load_and_test_long(Register dst, const Address &a) {
 982   z_ltg(dst, a);
 983 }
 984 
 985 // Test a bit in memory.
 986 void MacroAssembler::testbit(const Address &a, unsigned int bit) {
 987   assert(a.index() == noreg, "no index reg allowed in testbit");
 988   if (bit <= 7) {
 989     z_tm(a.disp() + 3, a.base(), 1 << bit);
 990   } else if (bit <= 15) {
 991     z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));
 992   } else if (bit <= 23) {
 993     z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));
 994   } else if (bit <= 31) {
 995     z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));
 996   } else {
 997     ShouldNotReachHere();
 998   }
 999 }
1000 
1001 // Test a bit in a register. Result is reflected in CC.
1002 void MacroAssembler::testbit(Register r, unsigned int bitPos) {
1003   if (bitPos < 16) {
1004     z_tmll(r, 1U<<bitPos);
1005   } else if (bitPos < 32) {
1006     z_tmlh(r, 1U<<(bitPos-16));
1007   } else if (bitPos < 48) {
1008     z_tmhl(r, 1U<<(bitPos-32));
1009   } else if (bitPos < 64) {
1010     z_tmhh(r, 1U<<(bitPos-48));
1011   } else {
1012     ShouldNotReachHere();
1013   }
1014 }
1015 
1016 void MacroAssembler::prefetch_read(Address a) {
1017   z_pfd(1, a.disp20(), a.indexOrR0(), a.base());
1018 }
1019 void MacroAssembler::prefetch_update(Address a) {
1020   z_pfd(2, a.disp20(), a.indexOrR0(), a.base());
1021 }
1022 
1023 // Clear a register, i.e. load const zero into reg.
1024 // Return len (in bytes) of generated instruction(s).
1025 // whole_reg: Clear 64 bits if true, 32 bits otherwise.
1026 // set_cc:    Use instruction that sets the condition code, if true.
1027 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {
1028   unsigned int start_off = offset();
1029   if (whole_reg) {
1030     set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);
1031   } else {  // Only 32bit register.
1032     set_cc ? z_xr(r, r) : z_lhi(r, 0);
1033   }
1034   return offset() - start_off;
1035 }
1036 
1037 #ifdef ASSERT
1038 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {
1039   switch (pattern_len) {
1040     case 1:
1041       pattern = (pattern & 0x000000ff)  | ((pattern & 0x000000ff)<<8);
1042     case 2:
1043       pattern = (pattern & 0x0000ffff)  | ((pattern & 0x0000ffff)<<16);
1044     case 4:
1045       pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);
1046     case 8:
1047       return load_const_optimized_rtn_len(r, pattern, true);
1048       break;
1049     default:
1050       guarantee(false, "preset_reg: bad len");
1051   }
1052   return 0;
1053 }
1054 #endif
1055 
1056 // addr: Address descriptor of memory to clear. Index register will not be used!
1057 // size: Number of bytes to clear.
1058 // condition code will not be preserved.
1059 //    !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!
1060 //    !!! Use store_const() instead                  !!!
1061 void MacroAssembler::clear_mem(const Address& addr, unsigned int size) {
1062   guarantee((addr.disp() + size) <= 4096, "MacroAssembler::clear_mem: size too large");
1063 
1064   switch (size) {
1065     case 0:
1066       return;
1067     case 1:
1068       z_mvi(addr, 0);
1069       return;
1070     case 2:
1071       z_mvhhi(addr, 0);
1072       return;
1073     case 4:
1074       z_mvhi(addr, 0);
1075       return;
1076     case 8:
1077       z_mvghi(addr, 0);
1078       return;
1079     default: ; // Fallthru to xc.
1080   }
1081 
1082   // Caution: the emitter with Address operands does implicitly decrement the length
1083   if (size <= 256) {
1084     z_xc(addr, size, addr);
1085   } else {
1086     unsigned int offset = addr.disp();
1087     unsigned int incr   = 256;
1088     for (unsigned int i = 0; i <= size-incr; i += incr) {
1089       z_xc(offset, incr - 1, addr.base(), offset, addr.base());
1090       offset += incr;
1091     }
1092     unsigned int rest = size - (offset - addr.disp());
1093     if (size > 0) {
1094       z_xc(offset, rest-1, addr.base(), offset, addr.base());
1095     }
1096   }
1097 }
1098 
1099 void MacroAssembler::align(int modulus) {
1100   while (offset() % modulus != 0) z_nop();
1101 }
1102 
1103 // Special version for non-relocateable code if required alignment
1104 // is larger than CodeEntryAlignment.
1105 void MacroAssembler::align_address(int modulus) {
1106   while ((uintptr_t)pc() % modulus != 0) z_nop();
1107 }
1108 
1109 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1110                                          Register temp_reg,
1111                                          int64_t extra_slot_offset) {
1112   // On Z, we can have index and disp in an Address. So don't call argument_offset,
1113   // which issues an unnecessary add instruction.
1114   int stackElementSize = Interpreter::stackElementSize;
1115   int64_t offset = extra_slot_offset * stackElementSize;
1116   const Register argbase = Z_esp;
1117   if (arg_slot.is_constant()) {
1118     offset += arg_slot.as_constant() * stackElementSize;
1119     return Address(argbase, offset);
1120   }
1121   // else
1122   assert(temp_reg != noreg, "must specify");
1123   assert(temp_reg != Z_ARG1, "base and index are conflicting");
1124   z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3
1125   return Address(argbase, temp_reg, offset);
1126 }
1127 
1128 
1129 //===================================================================
1130 //===   START   C O N S T A N T S   I N   C O D E   S T R E A M   ===
1131 //===================================================================
1132 //===            P A T CH A B L E   C O N S T A N T S             ===
1133 //===================================================================
1134 
1135 
1136 //---------------------------------------------------
1137 //  Load (patchable) constant into register
1138 //---------------------------------------------------
1139 
1140 
1141 // Load absolute address (and try to optimize).
1142 //   Note: This method is usable only for position-fixed code,
1143 //         referring to a position-fixed target location.
1144 //         If not so, relocations and patching must be used.
1145 void MacroAssembler::load_absolute_address(Register d, address addr) {
1146   assert(addr != nullptr, "should not happen");
1147   BLOCK_COMMENT("load_absolute_address:");
1148   if (addr == nullptr) {
1149     z_larl(d, pc()); // Dummy emit for size calc.
1150     return;
1151   }
1152 
1153   if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {
1154     z_larl(d, addr);
1155     return;
1156   }
1157 
1158   load_const_optimized(d, (long)addr);
1159 }
1160 
1161 // Load a 64bit constant.
1162 // Patchable code sequence, but not atomically patchable.
1163 // Make sure to keep code size constant -> no value-dependent optimizations.
1164 // Do not kill condition code.
1165 void MacroAssembler::load_const(Register t, long x) {
1166   // Note: Right shift is only cleanly defined for unsigned types
1167   //       or for signed types with nonnegative values.
1168   Assembler::z_iihf(t, (long)((unsigned long)x >> 32));
1169   Assembler::z_iilf(t, (long)((unsigned long)x & 0xffffffffUL));
1170 }
1171 
1172 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend.
1173 // Patchable code sequence, but not atomically patchable.
1174 // Make sure to keep code size constant -> no value-dependent optimizations.
1175 // Do not kill condition code.
1176 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {
1177   if (sign_extend) { Assembler::z_lgfi(t, x); }
1178   else             { Assembler::z_llilf(t, x); }
1179 }
1180 
1181 // Load narrow oop constant, no decompression.
1182 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
1183   assert(UseCompressedOops, "must be on to call this method");
1184   load_const_32to64(t, CompressedOops::narrow_oop_value(a), false /*sign_extend*/);
1185 }
1186 
1187 // Load narrow klass constant, compression required.
1188 void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
1189   assert(UseCompressedClassPointers, "must be on to call this method");
1190   narrowKlass encoded_k = CompressedKlassPointers::encode(k);
1191   load_const_32to64(t, encoded_k, false /*sign_extend*/);
1192 }
1193 
1194 //------------------------------------------------------
1195 //  Compare (patchable) constant with register.
1196 //------------------------------------------------------
1197 
1198 // Compare narrow oop in reg with narrow oop constant, no decompression.
1199 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
1200   assert(UseCompressedOops, "must be on to call this method");
1201 
1202   Assembler::z_clfi(oop1, CompressedOops::narrow_oop_value(oop2));
1203 }
1204 
1205 // Compare narrow oop in reg with narrow oop constant, no decompression.
1206 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
1207   assert(UseCompressedClassPointers, "must be on to call this method");
1208   narrowKlass encoded_k = CompressedKlassPointers::encode(klass2);
1209 
1210   Assembler::z_clfi(klass1, encoded_k);
1211 }
1212 
1213 //----------------------------------------------------------
1214 //  Check which kind of load_constant we have here.
1215 //----------------------------------------------------------
1216 
1217 // Detection of CPU version dependent load_const sequence.
1218 // The detection is valid only for code sequences generated by load_const,
1219 // not load_const_optimized.
1220 bool MacroAssembler::is_load_const(address a) {
1221   unsigned long inst1, inst2;
1222   unsigned int  len1,  len2;
1223 
1224   len1 = get_instruction(a, &inst1);
1225   len2 = get_instruction(a + len1, &inst2);
1226 
1227   return is_z_iihf(inst1) && is_z_iilf(inst2);
1228 }
1229 
1230 // Detection of CPU version dependent load_const_32to64 sequence.
1231 // Mostly used for narrow oops and narrow Klass pointers.
1232 // The detection is valid only for code sequences generated by load_const_32to64.
1233 bool MacroAssembler::is_load_const_32to64(address pos) {
1234   unsigned long inst1, inst2;
1235   unsigned int len1;
1236 
1237   len1 = get_instruction(pos, &inst1);
1238   return is_z_llilf(inst1);
1239 }
1240 
1241 // Detection of compare_immediate_narrow sequence.
1242 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1243 bool MacroAssembler::is_compare_immediate32(address pos) {
1244   return is_equal(pos, CLFI_ZOPC, RIL_MASK);
1245 }
1246 
1247 // Detection of compare_immediate_narrow sequence.
1248 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1249 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {
1250   return is_compare_immediate32(pos);
1251   }
1252 
1253 // Detection of compare_immediate_narrow sequence.
1254 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass.
1255 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {
1256   return is_compare_immediate32(pos);
1257 }
1258 
1259 //-----------------------------------
1260 //  patch the load_constant
1261 //-----------------------------------
1262 
1263 // CPU-version dependent patching of load_const.
1264 void MacroAssembler::patch_const(address a, long x) {
1265   assert(is_load_const(a), "not a load of a constant");
1266   // Note: Right shift is only cleanly defined for unsigned types
1267   //       or for signed types with nonnegative values.
1268   set_imm32((address)a, (long)((unsigned long)x >> 32));
1269   set_imm32((address)(a + 6), (long)((unsigned long)x & 0xffffffffUL));
1270 }
1271 
1272 // Patching the value of CPU version dependent load_const_32to64 sequence.
1273 // The passed ptr MUST be in compressed format!
1274 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {
1275   assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");
1276 
1277   set_imm32(pos, np);
1278   return 6;
1279 }
1280 
1281 // Patching the value of CPU version dependent compare_immediate_narrow sequence.
1282 // The passed ptr MUST be in compressed format!
1283 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
1284   assert(is_compare_immediate32(pos), "not a compressed ptr compare");
1285 
1286   set_imm32(pos, np);
1287   return 6;
1288 }
1289 
1290 // Patching the immediate value of CPU version dependent load_narrow_oop sequence.
1291 // The passed ptr must NOT be in compressed format!
1292 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
1293   assert(UseCompressedOops, "Can only patch compressed oops");
1294   return patch_load_const_32to64(pos, CompressedOops::narrow_oop_value(o));
1295 }
1296 
1297 // Patching the immediate value of CPU version dependent load_narrow_klass sequence.
1298 // The passed ptr must NOT be in compressed format!
1299 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
1300   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1301 
1302   narrowKlass nk = CompressedKlassPointers::encode(k);
1303   return patch_load_const_32to64(pos, nk);
1304 }
1305 
1306 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.
1307 // The passed ptr must NOT be in compressed format!
1308 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
1309   assert(UseCompressedOops, "Can only patch compressed oops");
1310   return patch_compare_immediate_32(pos, CompressedOops::narrow_oop_value(o));
1311 }
1312 
1313 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
1314 // The passed ptr must NOT be in compressed format!
1315 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
1316   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1317 
1318   narrowKlass nk = CompressedKlassPointers::encode(k);
1319   return patch_compare_immediate_32(pos, nk);
1320 }
1321 
1322 //------------------------------------------------------------------------
1323 //  Extract the constant from a load_constant instruction stream.
1324 //------------------------------------------------------------------------
1325 
1326 // Get constant from a load_const sequence.
1327 long MacroAssembler::get_const(address a) {
1328   assert(is_load_const(a), "not a load of a constant");
1329   unsigned long x;
1330   x =  (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);
1331   x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));
1332   return (long) x;
1333 }
1334 
1335 //--------------------------------------
1336 //  Store a constant in memory.
1337 //--------------------------------------
1338 
1339 // General emitter to move a constant to memory.
1340 // The store is atomic.
1341 //  o Address must be given in RS format (no index register)
1342 //  o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.
1343 //  o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.
1344 //  o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.
1345 //  o Memory slot must be at least as wide as constant, will assert otherwise.
1346 //  o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.
1347 int MacroAssembler::store_const(const Address &dest, long imm,
1348                                 unsigned int lm, unsigned int lc,
1349                                 Register scratch) {
1350   int64_t  disp = dest.disp();
1351   Register base = dest.base();
1352   assert(!dest.has_index(), "not supported");
1353   assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory   length not supported");
1354   assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");
1355   assert(lm>=lc, "memory slot too small");
1356   assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");
1357   assert(Displacement::is_validDisp(disp), "displacement out of range");
1358 
1359   bool is_shortDisp = Displacement::is_shortDisp(disp);
1360   int store_offset = -1;
1361 
1362   // For target len == 1 it's easy.
1363   if (lm == 1) {
1364     store_offset = offset();
1365     if (is_shortDisp) {
1366       z_mvi(disp, base, imm);
1367       return store_offset;
1368     } else {
1369       z_mviy(disp, base, imm);
1370       return store_offset;
1371     }
1372   }
1373 
1374   // All the "good stuff" takes an unsigned displacement.
1375   if (is_shortDisp) {
1376     // NOTE: Cannot use clear_mem for imm==0, because it is not atomic.
1377 
1378     store_offset = offset();
1379     switch (lm) {
1380       case 2:  // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.
1381         z_mvhhi(disp, base, imm);
1382         return store_offset;
1383       case 4:
1384         if (Immediate::is_simm16(imm)) {
1385           z_mvhi(disp, base, imm);
1386           return store_offset;
1387         }
1388         break;
1389       case 8:
1390         if (Immediate::is_simm16(imm)) {
1391           z_mvghi(disp, base, imm);
1392           return store_offset;
1393         }
1394         break;
1395       default:
1396         ShouldNotReachHere();
1397         break;
1398     }
1399   }
1400 
1401   //  Can't optimize, so load value and store it.
1402   guarantee(scratch != noreg, " need a scratch register here !");
1403   if (imm != 0) {
1404     load_const_optimized(scratch, imm);  // Preserves CC anyway.
1405   } else {
1406     // Leave CC alone!!
1407     (void) clear_reg(scratch, true, false); // Indicate unused result.
1408   }
1409 
1410   store_offset = offset();
1411   if (is_shortDisp) {
1412     switch (lm) {
1413       case 2:
1414         z_sth(scratch, disp, Z_R0, base);
1415         return store_offset;
1416       case 4:
1417         z_st(scratch, disp, Z_R0, base);
1418         return store_offset;
1419       case 8:
1420         z_stg(scratch, disp, Z_R0, base);
1421         return store_offset;
1422       default:
1423         ShouldNotReachHere();
1424         break;
1425     }
1426   } else {
1427     switch (lm) {
1428       case 2:
1429         z_sthy(scratch, disp, Z_R0, base);
1430         return store_offset;
1431       case 4:
1432         z_sty(scratch, disp, Z_R0, base);
1433         return store_offset;
1434       case 8:
1435         z_stg(scratch, disp, Z_R0, base);
1436         return store_offset;
1437       default:
1438         ShouldNotReachHere();
1439         break;
1440     }
1441   }
1442   return -1; // should not reach here
1443 }
1444 
1445 //===================================================================
1446 //===       N O T   P A T CH A B L E   C O N S T A N T S          ===
1447 //===================================================================
1448 
1449 // Load constant x into register t with a fast instruction sequence
1450 // depending on the bits in x. Preserves CC under all circumstances.
1451 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {
1452   if (x == 0) {
1453     int len;
1454     if (emit) {
1455       len = clear_reg(t, true, false);
1456     } else {
1457       len = 4;
1458     }
1459     return len;
1460   }
1461 
1462   if (Immediate::is_simm16(x)) {
1463     if (emit) { z_lghi(t, x); }
1464     return 4;
1465   }
1466 
1467   // 64 bit value: | part1 | part2 | part3 | part4 |
1468   // At least one part is not zero!
1469   // Note: Right shift is only cleanly defined for unsigned types
1470   //       or for signed types with nonnegative values.
1471   int part1 = (int)((unsigned long)x >> 48) & 0x0000ffff;
1472   int part2 = (int)((unsigned long)x >> 32) & 0x0000ffff;
1473   int part3 = (int)((unsigned long)x >> 16) & 0x0000ffff;
1474   int part4 = (int)x & 0x0000ffff;
1475   int part12 = (int)((unsigned long)x >> 32);
1476   int part34 = (int)x;
1477 
1478   // Lower word only (unsigned).
1479   if (part12 == 0) {
1480     if (part3 == 0) {
1481       if (emit) z_llill(t, part4);
1482       return 4;
1483     }
1484     if (part4 == 0) {
1485       if (emit) z_llilh(t, part3);
1486       return 4;
1487     }
1488     if (emit) z_llilf(t, part34);
1489     return 6;
1490   }
1491 
1492   // Upper word only.
1493   if (part34 == 0) {
1494     if (part1 == 0) {
1495       if (emit) z_llihl(t, part2);
1496       return 4;
1497     }
1498     if (part2 == 0) {
1499       if (emit) z_llihh(t, part1);
1500       return 4;
1501     }
1502     if (emit) z_llihf(t, part12);
1503     return 6;
1504   }
1505 
1506   // Lower word only (signed).
1507   if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {
1508     if (emit) z_lgfi(t, part34);
1509     return 6;
1510   }
1511 
1512   int len = 0;
1513 
1514   if ((part1 == 0) || (part2 == 0)) {
1515     if (part1 == 0) {
1516       if (emit) z_llihl(t, part2);
1517       len += 4;
1518     } else {
1519       if (emit) z_llihh(t, part1);
1520       len += 4;
1521     }
1522   } else {
1523     if (emit) z_llihf(t, part12);
1524     len += 6;
1525   }
1526 
1527   if ((part3 == 0) || (part4 == 0)) {
1528     if (part3 == 0) {
1529       if (emit) z_iill(t, part4);
1530       len += 4;
1531     } else {
1532       if (emit) z_iilh(t, part3);
1533       len += 4;
1534     }
1535   } else {
1536     if (emit) z_iilf(t, part34);
1537     len += 6;
1538   }
1539   return len;
1540 }
1541 
1542 //=====================================================================
1543 //===     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1544 //=====================================================================
1545 
1546 // Note: In the worst case, one of the scratch registers is destroyed!!!
1547 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1548   // Right operand is constant.
1549   if (x2.is_constant()) {
1550     jlong value = x2.as_constant();
1551     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);
1552     return;
1553   }
1554 
1555   // Right operand is in register.
1556   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);
1557 }
1558 
1559 // Note: In the worst case, one of the scratch registers is destroyed!!!
1560 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1561   // Right operand is constant.
1562   if (x2.is_constant()) {
1563     jlong value = x2.as_constant();
1564     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);
1565     return;
1566   }
1567 
1568   // Right operand is in register.
1569   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);
1570 }
1571 
1572 // Note: In the worst case, one of the scratch registers is destroyed!!!
1573 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1574   // Right operand is constant.
1575   if (x2.is_constant()) {
1576     jlong value = x2.as_constant();
1577     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);
1578     return;
1579   }
1580 
1581   // Right operand is in register.
1582   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);
1583 }
1584 
1585 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1586   // Right operand is constant.
1587   if (x2.is_constant()) {
1588     jlong value = x2.as_constant();
1589     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);
1590     return;
1591   }
1592 
1593   // Right operand is in register.
1594   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);
1595 }
1596 
1597 // Generate an optimal branch to the branch target.
1598 // Optimal means that a relative branch (brc or brcl) is used if the
1599 // branch distance is short enough. Loading the target address into a
1600 // register and branching via reg is used as fallback only.
1601 //
1602 // Used registers:
1603 //   Z_R1 - work reg. Holds branch target address.
1604 //          Used in fallback case only.
1605 //
1606 // This version of branch_optimized is good for cases where the target address is known
1607 // and constant, i.e. is never changed (no relocation, no patching).
1608 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {
1609   address branch_origin = pc();
1610 
1611   if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1612     z_brc(cond, branch_addr);
1613   } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {
1614     z_brcl(cond, branch_addr);
1615   } else {
1616     load_const_optimized(Z_R1, branch_addr);  // CC must not get killed by load_const_optimized.
1617     z_bcr(cond, Z_R1);
1618   }
1619 }
1620 
1621 // This version of branch_optimized is good for cases where the target address
1622 // is potentially not yet known at the time the code is emitted.
1623 //
1624 // One very common case is a branch to an unbound label which is handled here.
1625 // The caller might know (or hope) that the branch distance is short enough
1626 // to be encoded in a 16bit relative address. In this case he will pass a
1627 // NearLabel branch_target.
1628 // Care must be taken with unbound labels. Each call to target(label) creates
1629 // an entry in the patch queue for that label to patch all references of the label
1630 // once it gets bound. Those recorded patch locations must be patchable. Otherwise,
1631 // an assertion fires at patch time.
1632 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {
1633   if (branch_target.is_bound()) {
1634     address branch_addr = target(branch_target);
1635     branch_optimized(cond, branch_addr);
1636   } else if (branch_target.is_near()) {
1637     z_brc(cond, branch_target);  // Caller assures that the target will be in range for z_brc.
1638   } else {
1639     z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
1640   }
1641 }
1642 
1643 // Generate an optimal compare and branch to the branch target.
1644 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1645 // branch distance is short enough. Loading the target address into a
1646 // register and branching via reg is used as fallback only.
1647 //
1648 // Input:
1649 //   r1 - left compare operand
1650 //   r2 - right compare operand
1651 void MacroAssembler::compare_and_branch_optimized(Register r1,
1652                                                   Register r2,
1653                                                   Assembler::branch_condition cond,
1654                                                   address  branch_addr,
1655                                                   bool     len64,
1656                                                   bool     has_sign) {
1657   unsigned int casenum = (len64?2:0)+(has_sign?0:1);
1658 
1659   address branch_origin = pc();
1660   if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1661     switch (casenum) {
1662       case 0: z_crj( r1, r2, cond, branch_addr); break;
1663       case 1: z_clrj (r1, r2, cond, branch_addr); break;
1664       case 2: z_cgrj(r1, r2, cond, branch_addr); break;
1665       case 3: z_clgrj(r1, r2, cond, branch_addr); break;
1666       default: ShouldNotReachHere(); break;
1667     }
1668   } else {
1669     switch (casenum) {
1670       case 0: z_cr( r1, r2); break;
1671       case 1: z_clr(r1, r2); break;
1672       case 2: z_cgr(r1, r2); break;
1673       case 3: z_clgr(r1, r2); break;
1674       default: ShouldNotReachHere(); break;
1675     }
1676     branch_optimized(cond, branch_addr);
1677   }
1678 }
1679 
1680 // Generate an optimal compare and branch to the branch target.
1681 // Optimal means that a relative branch (clgij, brc or brcl) is used if the
1682 // branch distance is short enough. Loading the target address into a
1683 // register and branching via reg is used as fallback only.
1684 //
1685 // Input:
1686 //   r1 - left compare operand (in register)
1687 //   x2 - right compare operand (immediate)
1688 void MacroAssembler::compare_and_branch_optimized(Register r1,
1689                                                   jlong    x2,
1690                                                   Assembler::branch_condition cond,
1691                                                   Label&   branch_target,
1692                                                   bool     len64,
1693                                                   bool     has_sign) {
1694   address      branch_origin = pc();
1695   bool         x2_imm8       = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
1696   bool         is_RelAddr16  = branch_target.is_near() ||
1697                                (branch_target.is_bound() &&
1698                                 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
1699   unsigned int casenum       = (len64?2:0)+(has_sign?0:1);
1700 
1701   if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {
1702     switch (casenum) {
1703       case 0: z_cij( r1, x2, cond, branch_target); break;
1704       case 1: z_clij(r1, x2, cond, branch_target); break;
1705       case 2: z_cgij(r1, x2, cond, branch_target); break;
1706       case 3: z_clgij(r1, x2, cond, branch_target); break;
1707       default: ShouldNotReachHere(); break;
1708     }
1709     return;
1710   }
1711 
1712   if (x2 == 0) {
1713     switch (casenum) {
1714       case 0: z_ltr(r1, r1); break;
1715       case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1716       case 2: z_ltgr(r1, r1); break;
1717       case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1718       default: ShouldNotReachHere(); break;
1719     }
1720   } else {
1721     if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {
1722       switch (casenum) {
1723         case 0: z_chi(r1, x2); break;
1724         case 1: z_chi(r1, x2); break; // positive immediate < 2**15
1725         case 2: z_cghi(r1, x2); break;
1726         case 3: z_cghi(r1, x2); break; // positive immediate < 2**15
1727         default: break;
1728       }
1729     } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {
1730       switch (casenum) {
1731         case 0: z_cfi( r1, x2); break;
1732         case 1: z_clfi(r1, x2); break;
1733         case 2: z_cgfi(r1, x2); break;
1734         case 3: z_clgfi(r1, x2); break;
1735         default: ShouldNotReachHere(); break;
1736       }
1737     } else {
1738       // No instruction with immediate operand possible, so load into register.
1739       Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;
1740       load_const_optimized(scratch, x2);
1741       switch (casenum) {
1742         case 0: z_cr( r1, scratch); break;
1743         case 1: z_clr(r1, scratch); break;
1744         case 2: z_cgr(r1, scratch); break;
1745         case 3: z_clgr(r1, scratch); break;
1746         default: ShouldNotReachHere(); break;
1747       }
1748     }
1749   }
1750   branch_optimized(cond, branch_target);
1751 }
1752 
1753 // Generate an optimal compare and branch to the branch target.
1754 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1755 // branch distance is short enough. Loading the target address into a
1756 // register and branching via reg is used as fallback only.
1757 //
1758 // Input:
1759 //   r1 - left compare operand
1760 //   r2 - right compare operand
1761 void MacroAssembler::compare_and_branch_optimized(Register r1,
1762                                                   Register r2,
1763                                                   Assembler::branch_condition cond,
1764                                                   Label&   branch_target,
1765                                                   bool     len64,
1766                                                   bool     has_sign) {
1767   unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
1768 
1769   if (branch_target.is_bound()) {
1770     address branch_addr = target(branch_target);
1771     compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
1772   } else {
1773     if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
1774       switch (casenum) {
1775         case 0: z_crj(  r1, r2, cond, branch_target); break;
1776         case 1: z_clrj( r1, r2, cond, branch_target); break;
1777         case 2: z_cgrj( r1, r2, cond, branch_target); break;
1778         case 3: z_clgrj(r1, r2, cond, branch_target); break;
1779         default: ShouldNotReachHere(); break;
1780       }
1781     } else {
1782       switch (casenum) {
1783         case 0: z_cr( r1, r2); break;
1784         case 1: z_clr(r1, r2); break;
1785         case 2: z_cgr(r1, r2); break;
1786         case 3: z_clgr(r1, r2); break;
1787         default: ShouldNotReachHere(); break;
1788       }
1789       branch_optimized(cond, branch_target);
1790     }
1791   }
1792 }
1793 
1794 //===========================================================================
1795 //===   END     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1796 //===========================================================================
1797 
1798 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1799   assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
1800   int index = oop_recorder()->allocate_metadata_index(obj);
1801   RelocationHolder rspec = metadata_Relocation::spec(index);
1802   return AddressLiteral((address)obj, rspec);
1803 }
1804 
1805 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1806   assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
1807   int index = oop_recorder()->find_index(obj);
1808   RelocationHolder rspec = metadata_Relocation::spec(index);
1809   return AddressLiteral((address)obj, rspec);
1810 }
1811 
1812 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1813   assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
1814   int oop_index = oop_recorder()->allocate_oop_index(obj);
1815   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1816 }
1817 
1818 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1819   assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
1820   int oop_index = oop_recorder()->find_index(obj);
1821   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1822 }
1823 
1824 // NOTE: destroys r
1825 void MacroAssembler::c2bool(Register r, Register t) {
1826   z_lcr(t, r);   // t = -r
1827   z_or(r, t);    // r = -r OR r
1828   z_srl(r, 31);  // Yields 0 if r was 0, 1 otherwise.
1829 }
1830 
1831 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'
1832 // and return the resulting instruction.
1833 // Dest_pos and inst_pos are 32 bit only. These parms can only designate
1834 // relative positions.
1835 // Use correct argument types. Do not pre-calculate distance.
1836 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {
1837   int c = 0;
1838   unsigned long patched_inst = 0;
1839   if (is_call_pcrelative_short(inst) ||
1840       is_branch_pcrelative_short(inst) ||
1841       is_branchoncount_pcrelative_short(inst) ||
1842       is_branchonindex32_pcrelative_short(inst)) {
1843     c = 1;
1844     int m = fmask(15, 0);    // simm16(-1, 16, 32);
1845     int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);
1846     patched_inst = (inst & ~m) | v;
1847   } else if (is_compareandbranch_pcrelative_short(inst)) {
1848     c = 2;
1849     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1850     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1851     patched_inst = (inst & ~m) | v;
1852   } else if (is_branchonindex64_pcrelative_short(inst)) {
1853     c = 3;
1854     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1855     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1856     patched_inst = (inst & ~m) | v;
1857   } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {
1858     c = 4;
1859     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1860     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1861     patched_inst = (inst & ~m) | v;
1862   } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.
1863     c = 5;
1864     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1865     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1866     patched_inst = (inst & ~m) | v;
1867   } else {
1868     print_dbg_msg(tty, inst, "not a relative branch", 0);
1869     dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");
1870     ShouldNotReachHere();
1871   }
1872 
1873   long new_off = get_pcrel_offset(patched_inst);
1874   if (new_off != (dest_pos-inst_pos)) {
1875     tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);
1876     print_dbg_msg(tty, inst,         "<- original instruction: branch patching error", 0);
1877     print_dbg_msg(tty, patched_inst, "<- patched  instruction: branch patching error", 0);
1878 #ifdef LUCY_DBG
1879     VM_Version::z_SIGSEGV();
1880 #endif
1881     ShouldNotReachHere();
1882   }
1883   return patched_inst;
1884 }
1885 
1886 // Only called when binding labels (share/vm/asm/assembler.cpp)
1887 // Pass arguments as intended. Do not pre-calculate distance.
1888 void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
1889   unsigned long stub_inst;
1890   int           inst_len = get_instruction(branch, &stub_inst);
1891 
1892   set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);
1893 }
1894 
1895 
1896 // Extract relative address (aka offset).
1897 // inv_simm16 works for 4-byte instructions only.
1898 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle".
1899 long MacroAssembler::get_pcrel_offset(unsigned long inst) {
1900 
1901   if (MacroAssembler::is_pcrelative_short(inst)) {
1902     if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {
1903       return RelAddr::inv_pcrel_off16(inv_simm16(inst));
1904     } else {
1905       return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));
1906     }
1907   }
1908 
1909   if (MacroAssembler::is_pcrelative_long(inst)) {
1910     return RelAddr::inv_pcrel_off32(inv_simm32(inst));
1911   }
1912 
1913   print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);
1914 #ifdef LUCY_DBG
1915   VM_Version::z_SIGSEGV();
1916 #else
1917   ShouldNotReachHere();
1918 #endif
1919   return -1;
1920 }
1921 
1922 long MacroAssembler::get_pcrel_offset(address pc) {
1923   unsigned long inst;
1924   unsigned int  len = get_instruction(pc, &inst);
1925 
1926 #ifdef ASSERT
1927   long offset;
1928   if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {
1929     offset = get_pcrel_offset(inst);
1930   } else {
1931     offset = -1;
1932   }
1933 
1934   if (offset == -1) {
1935     dump_code_range(tty, pc, 32, "not a pcrelative instruction");
1936 #ifdef LUCY_DBG
1937     VM_Version::z_SIGSEGV();
1938 #else
1939     ShouldNotReachHere();
1940 #endif
1941   }
1942   return offset;
1943 #else
1944   return get_pcrel_offset(inst);
1945 #endif // ASSERT
1946 }
1947 
1948 // Get target address from pc-relative instructions.
1949 address MacroAssembler::get_target_addr_pcrel(address pc) {
1950   assert(is_pcrelative_long(pc), "not a pcrelative instruction");
1951   return pc + get_pcrel_offset(pc);
1952 }
1953 
1954 // Patch pc relative load address.
1955 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {
1956   unsigned long inst;
1957   // Offset is +/- 2**32 -> use long.
1958   ptrdiff_t distance = con - pc;
1959 
1960   get_instruction(pc, &inst);
1961 
1962   if (is_pcrelative_short(inst)) {
1963     *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc);  // Instructions are at least 2-byte aligned, no test required.
1964 
1965     // Some extra safety net.
1966     if (!RelAddr::is_in_range_of_RelAddr16(distance)) {
1967       print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);
1968       dump_code_range(tty, pc, 32, "distance out of range (16bit)");
1969       guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");
1970     }
1971     return;
1972   }
1973 
1974   if (is_pcrelative_long(inst)) {
1975     *(int *)(pc+2)   = RelAddr::pcrel_off32(con, pc);
1976 
1977     // Some Extra safety net.
1978     if (!RelAddr::is_in_range_of_RelAddr32(distance)) {
1979       print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);
1980       dump_code_range(tty, pc, 32, "distance out of range (32bit)");
1981       guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");
1982     }
1983     return;
1984   }
1985 
1986   guarantee(false, "not a pcrelative instruction to patch!");
1987 }
1988 
1989 // "Current PC" here means the address just behind the basr instruction.
1990 address MacroAssembler::get_PC(Register result) {
1991   z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
1992   return pc();
1993 }
1994 
1995 // Get current PC + offset.
1996 // Offset given in bytes, must be even!
1997 // "Current PC" here means the address of the larl instruction plus the given offset.
1998 address MacroAssembler::get_PC(Register result, int64_t offset) {
1999   address here = pc();
2000   z_larl(result, offset/2); // Save target instruction address in result.
2001   return here + offset;
2002 }
2003 
2004 void MacroAssembler::instr_size(Register size, Register pc) {
2005   // Extract 2 most significant bits of current instruction.
2006   z_llgc(size, Address(pc));
2007   z_srl(size, 6);
2008   // Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6.
2009   z_ahi(size, 3);
2010   z_nill(size, 6);
2011 }
2012 
2013 // Resize_frame with SP(new) = SP(old) - [offset].
2014 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2015 {
2016   assert_different_registers(offset, fp, Z_SP);
2017   if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2018 
2019   z_sgr(Z_SP, offset);
2020   z_stg(fp, _z_abi(callers_sp), Z_SP);
2021 }
2022 
2023 // Resize_frame with SP(new) = [newSP] + offset.
2024 //   This emitter is useful if we already have calculated a pointer
2025 //   into the to-be-allocated stack space, e.g. with special alignment properties,
2026 //   but need some additional space, e.g. for spilling.
2027 //   newSP    is the pre-calculated pointer. It must not be modified.
2028 //   fp       holds, or is filled with, the frame pointer.
2029 //   offset   is the additional increment which is added to addr to form the new SP.
2030 //            Note: specify a negative value to reserve more space!
2031 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2032 //                    It does not guarantee that fp contains the frame pointer at the end.
2033 void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {
2034   assert_different_registers(newSP, fp, Z_SP);
2035 
2036   if (load_fp) {
2037     z_lg(fp, _z_abi(callers_sp), Z_SP);
2038   }
2039 
2040   add2reg(Z_SP, offset, newSP);
2041   z_stg(fp, _z_abi(callers_sp), Z_SP);
2042 }
2043 
2044 // Resize_frame with SP(new) = [newSP].
2045 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2046 //                    It does not guarantee that fp contains the frame pointer at the end.
2047 void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {
2048   assert_different_registers(newSP, fp, Z_SP);
2049 
2050   if (load_fp) {
2051     z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.
2052   }
2053 
2054   z_lgr(Z_SP, newSP);
2055   if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.
2056     z_stg(fp, _z_abi(callers_sp), newSP);
2057   } else {
2058     z_stg(fp, _z_abi(callers_sp), Z_SP);
2059   }
2060 }
2061 
2062 // Resize_frame with SP(new) = SP(old) + offset.
2063 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
2064   assert_different_registers(fp, Z_SP);
2065 
2066   if (load_fp) {
2067     z_lg(fp, _z_abi(callers_sp), Z_SP);
2068   }
2069   add64(Z_SP, offset);
2070   z_stg(fp, _z_abi(callers_sp), Z_SP);
2071 }
2072 
2073 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
2074 #ifdef ASSERT
2075   assert_different_registers(bytes, old_sp, Z_SP);
2076   if (!copy_sp) {
2077     z_cgr(old_sp, Z_SP);
2078     asm_assert(bcondEqual, "[old_sp]!=[Z_SP]", 0x211);
2079   }
2080 #endif
2081   if (copy_sp) { z_lgr(old_sp, Z_SP); }
2082   if (bytes_with_inverted_sign) {
2083     z_agr(Z_SP, bytes);
2084   } else {
2085     z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
2086   }
2087   z_stg(old_sp, _z_abi(callers_sp), Z_SP);
2088 }
2089 
2090 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
2091   long offset = Assembler::align(bytes, frame::alignment_in_bytes);
2092   assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);
2093   assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);
2094 
2095   // We must not write outside the current stack bounds (given by Z_SP).
2096   // Thus, we have to first update Z_SP and then store the previous SP as stack linkage.
2097   // We rely on Z_R0 by default to be available as scratch.
2098   z_lgr(scratch, Z_SP);
2099   add2reg(Z_SP, -offset);
2100   z_stg(scratch, _z_abi(callers_sp), Z_SP);
2101 #ifdef ASSERT
2102   // Just make sure nobody uses the value in the default scratch register.
2103   // When another register is used, the caller might rely on it containing the frame pointer.
2104   if (scratch == Z_R0) {
2105     z_iihf(scratch, 0xbaadbabe);
2106     z_iilf(scratch, 0xdeadbeef);
2107   }
2108 #endif
2109   return offset;
2110 }
2111 
2112 // Push a frame of size `bytes' plus abi160 on top.
2113 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {
2114   BLOCK_COMMENT("push_frame_abi160 {");
2115   unsigned int res = push_frame(bytes + frame::z_abi_160_size);
2116   BLOCK_COMMENT("} push_frame_abi160");
2117   return res;
2118 }
2119 
2120 // Pop current C frame.
2121 void MacroAssembler::pop_frame() {
2122   BLOCK_COMMENT("pop_frame:");
2123   Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
2124 }
2125 
2126 // Pop current C frame and restore return PC register (Z_R14).
2127 void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {
2128   BLOCK_COMMENT("pop_frame_restore_retPC:");
2129   int retPC_offset = _z_common_abi(return_pc) + frame_size_in_bytes;
2130   // If possible, pop frame by add instead of load (a penny saved is a penny got :-).
2131   if (Displacement::is_validDisp(retPC_offset)) {
2132     z_lg(Z_R14, retPC_offset, Z_SP);
2133     add2reg(Z_SP, frame_size_in_bytes);
2134   } else {
2135     add2reg(Z_SP, frame_size_in_bytes);
2136     restore_return_pc();
2137   }
2138 }
2139 
2140 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
2141   if (allow_relocation) {
2142     call_c(entry_point);
2143   } else {
2144     call_c_static(entry_point);
2145   }
2146 }
2147 
2148 void MacroAssembler::call_VM_leaf_base(address entry_point) {
2149   bool allow_relocation = true;
2150   call_VM_leaf_base(entry_point, allow_relocation);
2151 }
2152 
2153 void MacroAssembler::call_VM_base(Register oop_result,
2154                                   Register last_java_sp,
2155                                   address  entry_point,
2156                                   bool     allow_relocation,
2157                                   bool     check_exceptions) { // Defaults to true.
2158   // Allow_relocation indicates, if true, that the generated code shall
2159   // be fit for code relocation or referenced data relocation. In other
2160   // words: all addresses must be considered variable. PC-relative addressing
2161   // is not possible then.
2162   // On the other hand, if (allow_relocation == false), addresses and offsets
2163   // may be considered stable, enabling us to take advantage of some PC-relative
2164   // addressing tweaks. These might improve performance and reduce code size.
2165 
2166   // Determine last_java_sp register.
2167   if (!last_java_sp->is_valid()) {
2168     last_java_sp = Z_SP;  // Load Z_SP as SP.
2169   }
2170 
2171   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);
2172 
2173   // ARG1 must hold thread address.
2174   z_lgr(Z_ARG1, Z_thread);
2175 
2176   address return_pc = nullptr;
2177   if (allow_relocation) {
2178     return_pc = call_c(entry_point);
2179   } else {
2180     return_pc = call_c_static(entry_point);
2181   }
2182 
2183   reset_last_Java_frame(allow_relocation);
2184 
2185   // C++ interp handles this in the interpreter.
2186   check_and_handle_popframe(Z_thread);
2187   check_and_handle_earlyret(Z_thread);
2188 
2189   // Check for pending exceptions.
2190   if (check_exceptions) {
2191     // Check for pending exceptions (java_thread is set upon return).
2192     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
2193 
2194     // This used to conditionally jump to forward_exception however it is
2195     // possible if we relocate that the branch will not reach. So we must jump
2196     // around so we can always reach.
2197 
2198     Label ok;
2199     z_bre(ok); // Bcondequal is the same as bcondZero.
2200     call_stub(StubRoutines::forward_exception_entry());
2201     bind(ok);
2202   }
2203 
2204   // Get oop result if there is one and reset the value in the thread.
2205   if (oop_result->is_valid()) {
2206     get_vm_result(oop_result);
2207   }
2208 
2209   _last_calls_return_pc = return_pc;  // Wipe out other (error handling) calls.
2210 }
2211 
2212 void MacroAssembler::call_VM_base(Register oop_result,
2213                                   Register last_java_sp,
2214                                   address  entry_point,
2215                                   bool     check_exceptions) { // Defaults to true.
2216   bool allow_relocation = true;
2217   call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);
2218 }
2219 
2220 // VM calls without explicit last_java_sp.
2221 
2222 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2223   // Call takes possible detour via InterpreterMacroAssembler.
2224   call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);
2225 }
2226 
2227 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2228   // Z_ARG1 is reserved for the thread.
2229   lgr_if_needed(Z_ARG2, arg_1);
2230   call_VM(oop_result, entry_point, check_exceptions);
2231 }
2232 
2233 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2234   // Z_ARG1 is reserved for the thread.
2235   assert_different_registers(arg_2, Z_ARG2);
2236   lgr_if_needed(Z_ARG2, arg_1);
2237   lgr_if_needed(Z_ARG3, arg_2);
2238   call_VM(oop_result, entry_point, check_exceptions);
2239 }
2240 
2241 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2242                              Register arg_3, bool check_exceptions) {
2243   // Z_ARG1 is reserved for the thread.
2244   assert_different_registers(arg_3, Z_ARG2, Z_ARG3);
2245   assert_different_registers(arg_2, Z_ARG2);
2246   lgr_if_needed(Z_ARG2, arg_1);
2247   lgr_if_needed(Z_ARG3, arg_2);
2248   lgr_if_needed(Z_ARG4, arg_3);
2249   call_VM(oop_result, entry_point, check_exceptions);
2250 }
2251 
2252 // VM static calls without explicit last_java_sp.
2253 
2254 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {
2255   // Call takes possible detour via InterpreterMacroAssembler.
2256   call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);
2257 }
2258 
2259 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2260                                     Register arg_3, bool check_exceptions) {
2261   // Z_ARG1 is reserved for the thread.
2262   assert_different_registers(arg_3, Z_ARG2, Z_ARG3);
2263   assert_different_registers(arg_2, Z_ARG2);
2264   lgr_if_needed(Z_ARG2, arg_1);
2265   lgr_if_needed(Z_ARG3, arg_2);
2266   lgr_if_needed(Z_ARG4, arg_3);
2267   call_VM_static(oop_result, entry_point, check_exceptions);
2268 }
2269 
2270 // VM calls with explicit last_java_sp.
2271 
2272 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {
2273   // Call takes possible detour via InterpreterMacroAssembler.
2274   call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);
2275 }
2276 
2277 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
2278    // Z_ARG1 is reserved for the thread.
2279    lgr_if_needed(Z_ARG2, arg_1);
2280    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2281 }
2282 
2283 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2284                              Register arg_2, bool check_exceptions) {
2285    // Z_ARG1 is reserved for the thread.
2286    assert_different_registers(arg_2, Z_ARG2);
2287    lgr_if_needed(Z_ARG2, arg_1);
2288    lgr_if_needed(Z_ARG3, arg_2);
2289    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2290 }
2291 
2292 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2293                              Register arg_2, Register arg_3, bool check_exceptions) {
2294   // Z_ARG1 is reserved for the thread.
2295   assert_different_registers(arg_3, Z_ARG2, Z_ARG3);
2296   assert_different_registers(arg_2, Z_ARG2);
2297   lgr_if_needed(Z_ARG2, arg_1);
2298   lgr_if_needed(Z_ARG3, arg_2);
2299   lgr_if_needed(Z_ARG4, arg_3);
2300   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2301 }
2302 
2303 // VM leaf calls.
2304 
2305 void MacroAssembler::call_VM_leaf(address entry_point) {
2306   // Call takes possible detour via InterpreterMacroAssembler.
2307   call_VM_leaf_base(entry_point, true);
2308 }
2309 
2310 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
2311   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2312   call_VM_leaf(entry_point);
2313 }
2314 
2315 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
2316   assert_different_registers(arg_2, Z_ARG1);
2317   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2318   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2319   call_VM_leaf(entry_point);
2320 }
2321 
2322 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2323   assert_different_registers(arg_3, Z_ARG1, Z_ARG2);
2324   assert_different_registers(arg_2, Z_ARG1);
2325   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2326   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2327   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2328   call_VM_leaf(entry_point);
2329 }
2330 
2331 // Static VM leaf calls.
2332 // Really static VM leaf calls are never patched.
2333 
2334 void MacroAssembler::call_VM_leaf_static(address entry_point) {
2335   // Call takes possible detour via InterpreterMacroAssembler.
2336   call_VM_leaf_base(entry_point, false);
2337 }
2338 
2339 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {
2340   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2341   call_VM_leaf_static(entry_point);
2342 }
2343 
2344 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {
2345   assert_different_registers(arg_2, Z_ARG1);
2346   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2347   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2348   call_VM_leaf_static(entry_point);
2349 }
2350 
2351 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2352   assert_different_registers(arg_3, Z_ARG1, Z_ARG2);
2353   assert_different_registers(arg_2, Z_ARG1);
2354   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2355   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2356   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2357   call_VM_leaf_static(entry_point);
2358 }
2359 
2360 // Don't use detour via call_c(reg).
2361 address MacroAssembler::call_c(address function_entry) {
2362   load_const(Z_R1, function_entry);
2363   return call(Z_R1);
2364 }
2365 
2366 // Variant for really static (non-relocatable) calls which are never patched.
2367 address MacroAssembler::call_c_static(address function_entry) {
2368   load_absolute_address(Z_R1, function_entry);
2369 #if 0 // def ASSERT
2370   // Verify that call site did not move.
2371   load_const_optimized(Z_R0, function_entry);
2372   z_cgr(Z_R1, Z_R0);
2373   z_brc(bcondEqual, 3);
2374   z_illtrap(0xba);
2375 #endif
2376   return call(Z_R1);
2377 }
2378 
2379 address MacroAssembler::call_c_opt(address function_entry) {
2380   bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
2381   _last_calls_return_pc = success ? pc() : nullptr;
2382   return _last_calls_return_pc;
2383 }
2384 
2385 // Identify a call_far_patchable instruction: LARL + LG + BASR
2386 //
2387 //    nop                   ; optionally, if required for alignment
2388 //    lgrl rx,A(TOC entry)  ; PC-relative access into constant pool
2389 //    basr Z_R14,rx         ; end of this instruction must be aligned to a word boundary
2390 //
2391 // Code pattern will eventually get patched into variant2 (see below for detection code).
2392 //
2393 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {
2394   address iaddr = instruction_addr;
2395 
2396   // Check for the actual load instruction.
2397   if (!is_load_const_from_toc(iaddr)) { return false; }
2398   iaddr += load_const_from_toc_size();
2399 
2400   // Check for the call (BASR) instruction, finally.
2401   assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");
2402   return is_call_byregister(iaddr);
2403 }
2404 
2405 // Identify a call_far_patchable instruction: BRASL
2406 //
2407 // Code pattern to suits atomic patching:
2408 //    nop                       ; Optionally, if required for alignment.
2409 //    nop    ...                ; Multiple filler nops to compensate for size difference (variant0 is longer).
2410 //    nop                       ; For code pattern detection: Prepend each BRASL with a nop.
2411 //    brasl  Z_R14,<reladdr>    ; End of code must be 4-byte aligned !
2412 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {
2413   const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());
2414 
2415   // Check for correct number of leading nops.
2416   address iaddr;
2417   for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {
2418     if (!is_z_nop(iaddr)) { return false; }
2419   }
2420   assert(iaddr == call_addr, "sanity");
2421 
2422   // --> Check for call instruction.
2423   if (is_call_far_pcrelative(call_addr)) {
2424     assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");
2425     return true;
2426   }
2427 
2428   return false;
2429 }
2430 
2431 // Emit a NOT mt-safely patchable 64 bit absolute call.
2432 // If toc_offset == -2, then the destination of the call (= target) is emitted
2433 //                      to the constant pool and a runtime_call relocation is added
2434 //                      to the code buffer.
2435 // If toc_offset != -2, target must already be in the constant pool at
2436 //                      _ctableStart+toc_offset (a caller can retrieve toc_offset
2437 //                      from the runtime_call relocation).
2438 // Special handling of emitting to scratch buffer when there is no constant pool.
2439 // Slightly changed code pattern. We emit an additional nop if we would
2440 // not end emitting at a word aligned address. This is to ensure
2441 // an atomically patchable displacement in brasl instructions.
2442 //
2443 // A call_far_patchable comes in different flavors:
2444 //  - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)
2445 //  - LGRL(CP) / BR          (address in constant pool, pc-relative access)
2446 //  - BRASL                  (relative address of call target coded in instruction)
2447 // All flavors occupy the same amount of space. Length differences are compensated
2448 // by leading nops, such that the instruction sequence always ends at the same
2449 // byte offset. This is required to keep the return offset constant.
2450 // Furthermore, the return address (the end of the instruction sequence) is forced
2451 // to be on a 4-byte boundary. This is required for atomic patching, should we ever
2452 // need to patch the call target of the BRASL flavor.
2453 // RETURN value: false, if no constant pool entry could be allocated, true otherwise.
2454 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {
2455   // Get current pc and ensure word alignment for end of instr sequence.
2456   const address start_pc = pc();
2457   const intptr_t       start_off = offset();
2458   assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");
2459   const ptrdiff_t      dist      = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.
2460   const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();
2461   const bool emit_relative_call  = !emit_target_to_pool &&
2462                                    RelAddr::is_in_range_of_RelAddr32(dist) &&
2463                                    ReoptimizeCallSequences &&
2464                                    !code_section()->scratch_emit();
2465 
2466   if (emit_relative_call) {
2467     // Add padding to get the same size as below.
2468     const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();
2469     unsigned int current_padding;
2470     for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }
2471     assert(current_padding == padding, "sanity");
2472 
2473     // relative call: len = 2(nop) + 6 (brasl)
2474     // CodeBlob resize cannot occur in this case because
2475     // this call is emitted into pre-existing space.
2476     z_nop(); // Prepend each BRASL with a nop.
2477     z_brasl(Z_R14, target);
2478   } else {
2479     // absolute call: Get address from TOC.
2480     // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}
2481     if (emit_target_to_pool) {
2482       // When emitting the call for the first time, we do not need to use
2483       // the pc-relative version. It will be patched anyway, when the code
2484       // buffer is copied.
2485       // Relocation is not needed when !ReoptimizeCallSequences.
2486       relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;
2487       AddressLiteral dest(target, rt);
2488       // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills
2489       // inst_mark(). Reset if possible.
2490       bool reset_mark = (inst_mark() == pc());
2491       tocOffset = store_oop_in_toc(dest);
2492       if (reset_mark) { set_inst_mark(); }
2493       if (tocOffset == -1) {
2494         return false; // Couldn't create constant pool entry.
2495       }
2496     }
2497     assert(offset() == start_off, "emit no code before this point!");
2498 
2499     address tocPos = pc() + tocOffset;
2500     if (emit_target_to_pool) {
2501       tocPos = code()->consts()->start() + tocOffset;
2502     }
2503     load_long_pcrelative(Z_R14, tocPos);
2504     z_basr(Z_R14, Z_R14);
2505   }
2506 
2507 #ifdef ASSERT
2508   // Assert that we can identify the emitted call.
2509   assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");
2510   assert(offset() == start_off+call_far_patchable_size(), "wrong size");
2511 
2512   if (emit_target_to_pool) {
2513     assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,
2514            "wrong encoding of dest address");
2515   }
2516 #endif
2517   return true; // success
2518 }
2519 
2520 // Identify a call_far_patchable instruction.
2521 // For more detailed information see header comment of call_far_patchable.
2522 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {
2523   return is_call_far_patchable_variant2_at(instruction_addr)  || // short version: BRASL
2524          is_call_far_patchable_variant0_at(instruction_addr);    // long version LARL + LG + BASR
2525 }
2526 
2527 // Does the call_far_patchable instruction use a pc-relative encoding
2528 // of the call destination?
2529 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {
2530   // Variant 2 is pc-relative.
2531   return is_call_far_patchable_variant2_at(instruction_addr);
2532 }
2533 
2534 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {
2535   // Prepend each BRASL with a nop.
2536   return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size());  // Match at position after one nop required.
2537 }
2538 
2539 // Set destination address of a call_far_patchable instruction.
2540 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {
2541   ResourceMark rm;
2542 
2543   // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).
2544   int code_size = MacroAssembler::call_far_patchable_size();
2545   CodeBuffer buf(instruction_addr, code_size);
2546   MacroAssembler masm(&buf);
2547   masm.call_far_patchable(dest, tocOffset);
2548   ICache::invalidate_range(instruction_addr, code_size); // Empty on z.
2549 }
2550 
2551 // Get dest address of a call_far_patchable instruction.
2552 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {
2553   // Dynamic TOC: absolute address in constant pool.
2554   // Check variant2 first, it is more frequent.
2555 
2556   // Relative address encoded in call instruction.
2557   if (is_call_far_patchable_variant2_at(instruction_addr)) {
2558     return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.
2559 
2560   // Absolute address in constant pool.
2561   } else if (is_call_far_patchable_variant0_at(instruction_addr)) {
2562     address iaddr = instruction_addr;
2563 
2564     long    tocOffset = get_load_const_from_toc_offset(iaddr);
2565     address tocLoc    = iaddr + tocOffset;
2566     return *(address *)(tocLoc);
2567   } else {
2568     fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);
2569     fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",
2570             *(unsigned long*)instruction_addr,
2571             *(unsigned long*)(instruction_addr+8),
2572             call_far_patchable_size());
2573     Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
2574     ShouldNotReachHere();
2575     return nullptr;
2576   }
2577 }
2578 
2579 void MacroAssembler::align_call_far_patchable(address pc) {
2580   if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }
2581 }
2582 
2583 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2584 }
2585 
2586 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2587 }
2588 
2589 // Read from the polling page.
2590 // Use TM or TMY instruction, depending on read offset.
2591 //   offset = 0: Use TM, safepoint polling.
2592 //   offset < 0: Use TMY, profiling safepoint polling.
2593 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {
2594   if (Immediate::is_uimm12(offset)) {
2595     z_tm(offset, polling_page_address, mask_safepoint);
2596   } else {
2597     z_tmy(offset, polling_page_address, mask_profiling);
2598   }
2599 }
2600 
2601 // Check whether z_instruction is a read access to the polling page
2602 // which was emitted by load_from_polling_page(..).
2603 bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
2604   unsigned long z_instruction;
2605   unsigned int  ilen = get_instruction(instr_loc, &z_instruction);
2606 
2607   if (ilen == 2) { return false; } // It's none of the allowed instructions.
2608 
2609   if (ilen == 4) {
2610     if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.
2611 
2612     int ms = inv_mask(z_instruction,8,32);  // mask
2613     int ra = inv_reg(z_instruction,16,32);  // base register
2614     int ds = inv_uimm12(z_instruction);     // displacement
2615 
2616     if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {
2617       return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.
2618     }
2619 
2620   } else { /* if (ilen == 6) */
2621 
2622     assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");
2623 
2624     if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.
2625 
2626     int ms = inv_mask(z_instruction,8,48);  // mask
2627     int ra = inv_reg(z_instruction,16,48);  // base register
2628     int ds = inv_simm20(z_instruction);     // displacement
2629   }
2630 
2631   return true;
2632 }
2633 
2634 // Extract poll address from instruction and ucontext.
2635 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
2636   assert(ucontext != nullptr, "must have ucontext");
2637   ucontext_t* uc = (ucontext_t*) ucontext;
2638   unsigned long z_instruction;
2639   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2640 
2641   if (ilen == 4 && is_z_tm(z_instruction)) {
2642     int ra = inv_reg(z_instruction, 16, 32);  // base register
2643     int ds = inv_uimm12(z_instruction);       // displacement
2644     address addr = (address)uc->uc_mcontext.gregs[ra];
2645     return addr + ds;
2646   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2647     int ra = inv_reg(z_instruction, 16, 48);  // base register
2648     int ds = inv_simm20(z_instruction);       // displacement
2649     address addr = (address)uc->uc_mcontext.gregs[ra];
2650     return addr + ds;
2651   }
2652 
2653   ShouldNotReachHere();
2654   return nullptr;
2655 }
2656 
2657 // Extract poll register from instruction.
2658 uint MacroAssembler::get_poll_register(address instr_loc) {
2659   unsigned long z_instruction;
2660   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2661 
2662   if (ilen == 4 && is_z_tm(z_instruction)) {
2663     return (uint)inv_reg(z_instruction, 16, 32);  // base register
2664   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2665     return (uint)inv_reg(z_instruction, 16, 48);  // base register
2666   }
2667 
2668   ShouldNotReachHere();
2669   return 0;
2670 }
2671 
2672 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
2673   const Address poll_byte_addr(Z_thread, in_bytes(JavaThread::polling_word_offset()) + 7 /* Big Endian */);
2674   // Armed page has poll_bit set.
2675   z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
2676   z_brnaz(slow_path);
2677 }
2678 
2679 // Don't rely on register locking, always use Z_R1 as scratch register instead.
2680 void MacroAssembler::bang_stack_with_offset(int offset) {
2681   // Stack grows down, caller passes positive offset.
2682   assert(offset > 0, "must bang with positive offset");
2683   if (Displacement::is_validDisp(-offset)) {
2684     z_tmy(-offset, Z_SP, mask_stackbang);
2685   } else {
2686     add2reg(Z_R1, -offset, Z_SP);    // Do not destroy Z_SP!!!
2687     z_tm(0, Z_R1, mask_stackbang);  // Just banging.
2688   }
2689 }
2690 
2691 void MacroAssembler::reserved_stack_check(Register return_pc) {
2692   // Test if reserved zone needs to be enabled.
2693   Label no_reserved_zone_enabling;
2694   assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2695   BLOCK_COMMENT("reserved_stack_check {");
2696 
2697   z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2698   z_brl(no_reserved_zone_enabling);
2699 
2700   // Enable reserved zone again, throw stack overflow exception.
2701   save_return_pc();
2702   push_frame_abi160(0);
2703   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
2704   pop_frame();
2705   restore_return_pc();
2706 
2707   load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
2708   // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
2709   z_br(Z_R1);
2710 
2711   should_not_reach_here();
2712 
2713   bind(no_reserved_zone_enabling);
2714   BLOCK_COMMENT("} reserved_stack_check");
2715 }
2716 
2717 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
2718 void MacroAssembler::tlab_allocate(Register obj,
2719                                    Register var_size_in_bytes,
2720                                    int con_size_in_bytes,
2721                                    Register t1,
2722                                    Label& slow_case) {
2723   assert_different_registers(obj, var_size_in_bytes, t1);
2724   Register end = t1;
2725   Register thread = Z_thread;
2726 
2727   z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));
2728   if (var_size_in_bytes == noreg) {
2729     z_lay(end, Address(obj, con_size_in_bytes));
2730   } else {
2731     z_lay(end, Address(obj, var_size_in_bytes));
2732   }
2733   z_cg(end, Address(thread, JavaThread::tlab_end_offset()));
2734   branch_optimized(bcondHigh, slow_case);
2735 
2736   // Update the tlab top pointer.
2737   z_stg(end, Address(thread, JavaThread::tlab_top_offset()));
2738 
2739   // Recover var_size_in_bytes if necessary.
2740   if (var_size_in_bytes == end) {
2741     z_sgr(var_size_in_bytes, obj);
2742   }
2743 }
2744 
2745 // Emitter for interface method lookup.
2746 //   input: recv_klass, intf_klass, itable_index
2747 //   output: method_result
2748 //   kills: itable_index, temp1_reg, Z_R0, Z_R1
2749 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.
2750 // If the register is still not needed then, remove it.
2751 void MacroAssembler::lookup_interface_method(Register           recv_klass,
2752                                              Register           intf_klass,
2753                                              RegisterOrConstant itable_index,
2754                                              Register           method_result,
2755                                              Register           temp1_reg,
2756                                              Label&             no_such_interface,
2757                                              bool               return_method) {
2758 
2759   const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
2760   const Register itable_entry_addr = Z_R1_scratch;
2761   const Register itable_interface = Z_R0_scratch;
2762 
2763   BLOCK_COMMENT("lookup_interface_method {");
2764 
2765   // Load start of itable entries into itable_entry_addr.
2766   z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
2767   z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
2768 
2769   // Loop over all itable entries until desired interfaceOop(Rinterface) found.
2770   add2reg_with_index(itable_entry_addr,
2771                      in_bytes(Klass::vtable_start_offset() + itableOffsetEntry::interface_offset()),
2772                      recv_klass, vtable_len);
2773 
2774   const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
2775   Label     search;
2776 
2777   bind(search);
2778 
2779   // Handle IncompatibleClassChangeError.
2780   // If the entry is null then we've reached the end of the table
2781   // without finding the expected interface, so throw an exception.
2782   load_and_test_long(itable_interface, Address(itable_entry_addr));
2783   z_bre(no_such_interface);
2784 
2785   add2reg(itable_entry_addr, itable_offset_search_inc);
2786   z_cgr(itable_interface, intf_klass);
2787   z_brne(search);
2788 
2789   // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
2790   if (return_method) {
2791     const int vtable_offset_offset = in_bytes(itableOffsetEntry::offset_offset() -
2792                                               itableOffsetEntry::interface_offset()) -
2793                                      itable_offset_search_inc;
2794 
2795     // Compute itableMethodEntry and get method and entry point
2796     // we use addressing with index and displacement, since the formula
2797     // for computing the entry's offset has a fixed and a dynamic part,
2798     // the latter depending on the matched interface entry and on the case,
2799     // that the itable index has been passed as a register, not a constant value.
2800     int method_offset = in_bytes(itableMethodEntry::method_offset());
2801                              // Fixed part (displacement), common operand.
2802     Register itable_offset = method_result;  // Dynamic part (index register).
2803 
2804     if (itable_index.is_register()) {
2805        // Compute the method's offset in that register, for the formula, see the
2806        // else-clause below.
2807        z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize));
2808        z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
2809     } else {
2810       // Displacement increases.
2811       method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
2812 
2813       // Load index from itable.
2814       z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
2815     }
2816 
2817     // Finally load the method's oop.
2818     z_lg(method_result, method_offset, itable_offset, recv_klass);
2819   }
2820   BLOCK_COMMENT("} lookup_interface_method");
2821 }
2822 
2823 // Lookup for virtual method invocation.
2824 void MacroAssembler::lookup_virtual_method(Register           recv_klass,
2825                                            RegisterOrConstant vtable_index,
2826                                            Register           method_result) {
2827   assert_different_registers(recv_klass, vtable_index.register_or_noreg());
2828   assert(vtableEntry::size() * wordSize == wordSize,
2829          "else adjust the scaling in the code below");
2830 
2831   BLOCK_COMMENT("lookup_virtual_method {");
2832 
2833   const int base = in_bytes(Klass::vtable_start_offset());
2834 
2835   if (vtable_index.is_constant()) {
2836     // Load with base + disp.
2837     Address vtable_entry_addr(recv_klass,
2838                               vtable_index.as_constant() * wordSize +
2839                               base +
2840                               in_bytes(vtableEntry::method_offset()));
2841 
2842     z_lg(method_result, vtable_entry_addr);
2843   } else {
2844     // Shift index properly and load with base + index + disp.
2845     Register vindex = vtable_index.as_register();
2846     Address  vtable_entry_addr(recv_klass, vindex,
2847                                base + in_bytes(vtableEntry::method_offset()));
2848 
2849     z_sllg(vindex, vindex, exact_log2(wordSize));
2850     z_lg(method_result, vtable_entry_addr);
2851   }
2852   BLOCK_COMMENT("} lookup_virtual_method");
2853 }
2854 
2855 // Factor out code to call ic_miss_handler.
2856 // Generate code to call the inline cache miss handler.
2857 //
2858 // In most cases, this code will be generated out-of-line.
2859 // The method parameters are intended to provide some variability.
2860 //   ICM          - Label which has to be bound to the start of useful code (past any traps).
2861 //   trapMarker   - Marking byte for the generated illtrap instructions (if any).
2862 //                  Any value except 0x00 is supported.
2863 //                  = 0x00 - do not generate illtrap instructions.
2864 //                         use nops to fill unused space.
2865 //   requiredSize - required size of the generated code. If the actually
2866 //                  generated code is smaller, use padding instructions to fill up.
2867 //                  = 0 - no size requirement, no padding.
2868 //   scratch      - scratch register to hold branch target address.
2869 //
2870 //  The method returns the code offset of the bound label.
2871 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {
2872   intptr_t startOffset = offset();
2873 
2874   // Prevent entry at content_begin().
2875   if (trapMarker != 0) {
2876     z_illtrap(trapMarker);
2877   }
2878 
2879   // Load address of inline cache miss code into scratch register
2880   // and branch to cache miss handler.
2881   BLOCK_COMMENT("IC miss handler {");
2882   BIND(ICM);
2883   unsigned int   labelOffset = offset();
2884   AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
2885 
2886   load_const_optimized(scratch, icmiss);
2887   z_br(scratch);
2888 
2889   // Fill unused space.
2890   if (requiredSize > 0) {
2891     while ((offset() - startOffset) < requiredSize) {
2892       if (trapMarker == 0) {
2893         z_nop();
2894       } else {
2895         z_illtrap(trapMarker);
2896       }
2897     }
2898   }
2899   BLOCK_COMMENT("} IC miss handler");
2900   return labelOffset;
2901 }
2902 
2903 void MacroAssembler::nmethod_UEP(Label& ic_miss) {
2904   Register ic_reg       = Z_inline_cache;
2905   int      klass_offset = oopDesc::klass_offset_in_bytes();
2906   if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
2907     if (VM_Version::has_CompareBranch()) {
2908       z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);
2909     } else {
2910       z_ltgr(Z_ARG1, Z_ARG1);
2911       z_bre(ic_miss);
2912     }
2913   }
2914   // Compare cached class against klass from receiver.
2915   compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);
2916   z_brne(ic_miss);
2917 }
2918 
2919 void MacroAssembler::check_klass_subtype_fast_path(Register   sub_klass,
2920                                                    Register   super_klass,
2921                                                    Register   temp1_reg,
2922                                                    Label*     L_success,
2923                                                    Label*     L_failure,
2924                                                    Label*     L_slow_path,
2925                                                    RegisterOrConstant super_check_offset) {
2926 
2927   const int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
2928   const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2929 
2930   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2931   bool need_slow_path = (must_load_sco ||
2932                          super_check_offset.constant_or_zero() == sc_offset);
2933 
2934   // Input registers must not overlap.
2935   assert_different_registers(sub_klass, super_klass, temp1_reg);
2936   if (super_check_offset.is_register()) {
2937     assert_different_registers(sub_klass, super_klass,
2938                                super_check_offset.as_register());
2939   } else if (must_load_sco) {
2940     assert(temp1_reg != noreg, "supply either a temp or a register offset");
2941   }
2942 
2943   const Register Rsuper_check_offset = temp1_reg;
2944 
2945   NearLabel L_fallthrough;
2946   int label_nulls = 0;
2947   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
2948   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
2949   if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
2950   assert(label_nulls <= 1 ||
2951          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
2952          "at most one null in the batch, usually");
2953 
2954   BLOCK_COMMENT("check_klass_subtype_fast_path {");
2955   // If the pointers are equal, we are done (e.g., String[] elements).
2956   // This self-check enables sharing of secondary supertype arrays among
2957   // non-primary types such as array-of-interface. Otherwise, each such
2958   // type would need its own customized SSA.
2959   // We move this check to the front of the fast path because many
2960   // type checks are in fact trivially successful in this manner,
2961   // so we get a nicely predicted branch right at the start of the check.
2962   compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);
2963 
2964   // Check the supertype display, which is uint.
2965   if (must_load_sco) {
2966     z_llgf(Rsuper_check_offset, sco_offset, super_klass);
2967     super_check_offset = RegisterOrConstant(Rsuper_check_offset);
2968   }
2969   Address super_check_addr(sub_klass, super_check_offset, 0);
2970   z_cg(super_klass, super_check_addr); // compare w/ displayed supertype
2971 
2972   // This check has worked decisively for primary supers.
2973   // Secondary supers are sought in the super_cache ('super_cache_addr').
2974   // (Secondary supers are interfaces and very deeply nested subtypes.)
2975   // This works in the same check above because of a tricky aliasing
2976   // between the super_cache and the primary super display elements.
2977   // (The 'super_check_addr' can address either, as the case requires.)
2978   // Note that the cache is updated below if it does not help us find
2979   // what we need immediately.
2980   // So if it was a primary super, we can just fail immediately.
2981   // Otherwise, it's the slow path for us (no success at this point).
2982 
2983   // Hacked jmp, which may only be used just before L_fallthrough.
2984 #define final_jmp(label)                                                \
2985   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
2986   else                            { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/
2987 
2988   if (super_check_offset.is_register()) {
2989     branch_optimized(Assembler::bcondEqual, *L_success);
2990     z_cfi(super_check_offset.as_register(), sc_offset);
2991     if (L_failure == &L_fallthrough) {
2992       branch_optimized(Assembler::bcondEqual, *L_slow_path);
2993     } else {
2994       branch_optimized(Assembler::bcondNotEqual, *L_failure);
2995       final_jmp(*L_slow_path);
2996     }
2997   } else if (super_check_offset.as_constant() == sc_offset) {
2998     // Need a slow path; fast failure is impossible.
2999     if (L_slow_path == &L_fallthrough) {
3000       branch_optimized(Assembler::bcondEqual, *L_success);
3001     } else {
3002       branch_optimized(Assembler::bcondNotEqual, *L_slow_path);
3003       final_jmp(*L_success);
3004     }
3005   } else {
3006     // No slow path; it's a fast decision.
3007     if (L_failure == &L_fallthrough) {
3008       branch_optimized(Assembler::bcondEqual, *L_success);
3009     } else {
3010       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3011       final_jmp(*L_success);
3012     }
3013   }
3014 
3015   bind(L_fallthrough);
3016 #undef local_brc
3017 #undef final_jmp
3018   BLOCK_COMMENT("} check_klass_subtype_fast_path");
3019   // fallthru (to slow path)
3020 }
3021 
3022 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
3023                                                    Register Rsuperklass,
3024                                                    Register Rarray_ptr,  // tmp
3025                                                    Register Rlength,     // tmp
3026                                                    Label* L_success,
3027                                                    Label* L_failure) {
3028   // Input registers must not overlap.
3029   // Also check for R1 which is explicitly used here.
3030   assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
3031   NearLabel L_fallthrough;
3032   int label_nulls = 0;
3033   if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
3034   if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
3035   assert(label_nulls <= 1, "at most one null in the batch");
3036 
3037   const int ss_offset = in_bytes(Klass::secondary_supers_offset());
3038   const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3039 
3040   const int length_offset = Array<Klass*>::length_offset_in_bytes();
3041   const int base_offset   = Array<Klass*>::base_offset_in_bytes();
3042 
3043   // Hacked jmp, which may only be used just before L_fallthrough.
3044 #define final_jmp(label)                                                \
3045   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3046   else                            branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/
3047 
3048   NearLabel loop_iterate, loop_count, match;
3049 
3050   BLOCK_COMMENT("check_klass_subtype_slow_path {");
3051   z_lg(Rarray_ptr, ss_offset, Rsubklass);
3052 
3053   load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));
3054   branch_optimized(Assembler::bcondZero, *L_failure);
3055 
3056   // Oops in table are NO MORE compressed.
3057   z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.
3058   z_bre(match);                               // Shortcut for array length = 1.
3059 
3060   // No match yet, so we must walk the array's elements.
3061   z_lngfr(Rlength, Rlength);
3062   z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array
3063   z_llill(Z_R1, BytesPerWord);               // Set increment/end index.
3064   add2reg(Rlength, 2 * BytesPerWord);        // start index  = -(n-2)*BytesPerWord
3065   z_slgr(Rarray_ptr, Rlength);               // start addr: +=  (n-2)*BytesPerWord
3066   z_bru(loop_count);
3067 
3068   BIND(loop_iterate);
3069   z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.
3070   z_bre(match);
3071   BIND(loop_count);
3072   z_brxlg(Rlength, Z_R1, loop_iterate);
3073 
3074   // Rsuperklass not found among secondary super classes -> failure.
3075   branch_optimized(Assembler::bcondAlways, *L_failure);
3076 
3077   // Got a hit. Return success (zero result). Set cache.
3078   // Cache load doesn't happen here. For speed it is directly emitted by the compiler.
3079 
3080   BIND(match);
3081 
3082   z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.
3083 
3084   final_jmp(*L_success);
3085 
3086   // Exit to the surrounding code.
3087   BIND(L_fallthrough);
3088 #undef local_brc
3089 #undef final_jmp
3090   BLOCK_COMMENT("} check_klass_subtype_slow_path");
3091 }
3092 
3093 // Emitter for combining fast and slow path.
3094 void MacroAssembler::check_klass_subtype(Register sub_klass,
3095                                          Register super_klass,
3096                                          Register temp1_reg,
3097                                          Register temp2_reg,
3098                                          Label&   L_success) {
3099   NearLabel failure;
3100   BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
3101   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
3102                                 &L_success, &failure, nullptr);
3103   check_klass_subtype_slow_path(sub_klass, super_klass,
3104                                 temp1_reg, temp2_reg, &L_success, nullptr);
3105   BIND(failure);
3106   BLOCK_COMMENT("} check_klass_subtype");
3107 }
3108 
3109 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
3110   assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
3111 
3112   Label L_fallthrough;
3113   if (L_fast_path == nullptr) {
3114     L_fast_path = &L_fallthrough;
3115   } else if (L_slow_path == nullptr) {
3116     L_slow_path = &L_fallthrough;
3117   }
3118 
3119   // Fast path check: class is fully initialized
3120   z_cli(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3121   z_bre(*L_fast_path);
3122 
3123   // Fast path check: current thread is initializer thread
3124   z_cg(thread, Address(klass, InstanceKlass::init_thread_offset()));
3125   if (L_slow_path == &L_fallthrough) {
3126     z_bre(*L_fast_path);
3127   } else if (L_fast_path == &L_fallthrough) {
3128     z_brne(*L_slow_path);
3129   } else {
3130     Unimplemented();
3131   }
3132 
3133   bind(L_fallthrough);
3134 }
3135 
3136 // Increment a counter at counter_address when the eq condition code is
3137 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
3138 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {
3139   Label l;
3140   z_brne(l);
3141   load_const(tmp1_reg, counter_address);
3142   add2mem_32(Address(tmp1_reg), 1, tmp2_reg);
3143   z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.
3144   bind(l);
3145 }
3146 
3147 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2) {
3148   Register displacedHeader = temp1;
3149   Register currentHeader = temp1;
3150   Register temp = temp2;
3151   NearLabel done, object_has_monitor;
3152 
3153   BLOCK_COMMENT("compiler_fast_lock_object {");
3154 
3155   // Load markWord from oop into mark.
3156   z_lg(displacedHeader, 0, oop);
3157 
3158   if (DiagnoseSyncOnValueBasedClasses != 0) {
3159     load_klass(Z_R1_scratch, oop);
3160     z_l(Z_R1_scratch, Address(Z_R1_scratch, Klass::access_flags_offset()));
3161     assert((JVM_ACC_IS_VALUE_BASED_CLASS & 0xFFFF) == 0, "or change following instruction");
3162     z_nilh(Z_R1_scratch, JVM_ACC_IS_VALUE_BASED_CLASS >> 16);
3163     z_brne(done);
3164   }
3165 
3166   // Handle existing monitor.
3167   // The object has an existing monitor iff (mark & monitor_value) != 0.
3168   guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
3169   z_lgr(temp, displacedHeader);
3170   z_nill(temp, markWord::monitor_value);
3171   z_brne(object_has_monitor);
3172 
3173   if (LockingMode == LM_MONITOR) {
3174     // Set NE to indicate 'failure' -> take slow-path
3175     z_ltgr(oop, oop);
3176     z_bru(done);
3177   } else if (LockingMode == LM_LEGACY) {
3178     // Set mark to markWord | markWord::unlocked_value.
3179     z_oill(displacedHeader, markWord::unlocked_value);
3180 
3181     // Load Compare Value application register.
3182 
3183     // Initialize the box (must happen before we update the object mark).
3184     z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
3185 
3186     // Memory Fence (in cmpxchgd)
3187     // Compare object markWord with mark and if equal exchange scratch1 with object markWord.
3188 
3189     // If the compare-and-swap succeeded, then we found an unlocked object and we
3190     // have now locked it.
3191     z_csg(displacedHeader, box, 0, oop);
3192     assert(currentHeader == displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
3193     z_bre(done);
3194 
3195     // We did not see an unlocked object so try the fast recursive case.
3196 
3197     z_sgr(currentHeader, Z_SP);
3198     load_const_optimized(temp, (~(os::vm_page_size() - 1) | markWord::lock_mask_in_place));
3199 
3200     z_ngr(currentHeader, temp);
3201     //   z_brne(done);
3202     //   z_release();
3203     z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3204 
3205     z_bru(done);
3206   } else {
3207     assert(LockingMode == LM_LIGHTWEIGHT, "must be");
3208     lightweight_lock(oop, displacedHeader, temp, done);
3209     z_bru(done);
3210   }
3211 
3212   Register zero = temp;
3213   Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
3214   bind(object_has_monitor);
3215   // The object's monitor m is unlocked iff m->owner is null,
3216   // otherwise m->owner may contain a thread or a stack address.
3217   //
3218   // Try to CAS m->owner from null to current thread.
3219   z_lghi(zero, 0);
3220   // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3221   z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3222   if (LockingMode != LM_LIGHTWEIGHT) {
3223     // Store a non-null value into the box.
3224     z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3225   }
3226 #ifdef ASSERT
3227   z_brne(done);
3228   // We've acquired the monitor, check some invariants.
3229   // Invariant 1: _recursions should be 0.
3230   asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
3231                           "monitor->_recursions should be 0", -1);
3232   z_ltgr(zero, zero); // Set CR=EQ.
3233 #endif
3234   bind(done);
3235 
3236   BLOCK_COMMENT("} compiler_fast_lock_object");
3237   // If locking was successful, CR should indicate 'EQ'.
3238   // The compiler or the native wrapper generates a branch to the runtime call
3239   // _complete_monitor_locking_Java.
3240 }
3241 
3242 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2) {
3243   Register displacedHeader = temp1;
3244   Register currentHeader = temp2;
3245   Register temp = temp1;
3246   Register monitor = temp2;
3247 
3248   const int hdr_offset = oopDesc::mark_offset_in_bytes();
3249 
3250   Label done, object_has_monitor;
3251 
3252   BLOCK_COMMENT("compiler_fast_unlock_object {");
3253 
3254   if (LockingMode == LM_LEGACY) {
3255     // Find the lock address and load the displaced header from the stack.
3256     // if the displaced header is zero, we have a recursive unlock.
3257     load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3258     z_bre(done);
3259   }
3260 
3261   // Handle existing monitor.
3262   // The object has an existing monitor iff (mark & monitor_value) != 0.
3263   z_lg(currentHeader, hdr_offset, oop);
3264   guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
3265   if (LockingMode == LM_LIGHTWEIGHT) {
3266     z_lgr(temp, currentHeader);
3267   }
3268   z_nill(currentHeader, markWord::monitor_value);
3269   z_brne(object_has_monitor);
3270 
3271   if (LockingMode == LM_MONITOR) {
3272     // Set NE to indicate 'failure' -> take slow-path
3273     z_ltgr(oop, oop);
3274     z_bru(done);
3275   } else if (LockingMode == LM_LEGACY) {
3276     // Check if it is still a light weight lock, this is true if we see
3277     // the stack address of the basicLock in the markWord of the object
3278     // copy box to currentHeader such that csg does not kill it.
3279     z_lgr(currentHeader, box);
3280     z_csg(currentHeader, displacedHeader, 0, oop);
3281     z_bru(done); // csg sets CR as desired.
3282   } else {
3283     assert(LockingMode == LM_LIGHTWEIGHT, "must be");
3284 
3285     // don't load currentHead again from stack-top after monitor check, as it is possible
3286     // some other thread modified it.
3287     // currentHeader is altered, but it's contents are copied in temp as well
3288     lightweight_unlock(oop, temp, currentHeader, done);
3289     z_bru(done);
3290   }
3291 
3292   // In case of LM_LIGHTWEIGHT, we may reach here with (temp & ObjectMonitor::ANONYMOUS_OWNER) != 0.
3293   // This is handled like owner thread mismatches: We take the slow path.
3294 
3295   // Handle existing monitor.
3296   bind(object_has_monitor);
3297   z_lg(currentHeader, hdr_offset, oop);    // CurrentHeader is tagged with monitor_value set.
3298   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3299   z_brne(done);
3300   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3301   z_brne(done);
3302   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3303   z_brne(done);
3304   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3305   z_brne(done);
3306   z_release();
3307   z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3308 
3309   bind(done);
3310 
3311   BLOCK_COMMENT("} compiler_fast_unlock_object");
3312   // flag == EQ indicates success
3313   // flag == NE indicates failure
3314 }
3315 
3316 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3317   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3318   bs->resolve_jobject(this, value, tmp1, tmp2);
3319 }
3320 
3321 // Last_Java_sp must comply to the rules in frame_s390.hpp.
3322 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3323   BLOCK_COMMENT("set_last_Java_frame {");
3324 
3325   // Always set last_Java_pc and flags first because once last_Java_sp
3326   // is visible has_last_Java_frame is true and users will look at the
3327   // rest of the fields. (Note: flags should always be zero before we
3328   // get here so doesn't need to be set.)
3329 
3330   // Verify that last_Java_pc was zeroed on return to Java.
3331   if (allow_relocation) {
3332     asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),
3333                             Z_thread,
3334                             "last_Java_pc not zeroed before leaving Java",
3335                             0x200);
3336   } else {
3337     asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),
3338                                    Z_thread,
3339                                    "last_Java_pc not zeroed before leaving Java",
3340                                    0x200);
3341   }
3342 
3343   // When returning from calling out from Java mode the frame anchor's
3344   // last_Java_pc will always be set to null. It is set here so that
3345   // if we are doing a call to native (not VM) that we capture the
3346   // known pc and don't have to rely on the native call having a
3347   // standard frame linkage where we can find the pc.
3348   if (last_Java_pc!=noreg) {
3349     z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));
3350   }
3351 
3352   // This membar release is not required on z/Architecture, since the sequence of stores
3353   // in maintained. Nevertheless, we leave it in to document the required ordering.
3354   // The implementation of z_release() should be empty.
3355   // z_release();
3356 
3357   z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));
3358   BLOCK_COMMENT("} set_last_Java_frame");
3359 }
3360 
3361 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {
3362   BLOCK_COMMENT("reset_last_Java_frame {");
3363 
3364   if (allow_relocation) {
3365     asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
3366                                Z_thread,
3367                                "SP was not set, still zero",
3368                                0x202);
3369   } else {
3370     asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),
3371                                       Z_thread,
3372                                       "SP was not set, still zero",
3373                                       0x202);
3374   }
3375 
3376   // _last_Java_sp = 0
3377   // Clearing storage must be atomic here, so don't use clear_mem()!
3378   store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);
3379 
3380   // _last_Java_pc = 0
3381   store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);
3382 
3383   BLOCK_COMMENT("} reset_last_Java_frame");
3384   return;
3385 }
3386 
3387 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {
3388   assert_different_registers(sp, tmp1);
3389 
3390   // We cannot trust that code generated by the C++ compiler saves R14
3391   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
3392   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
3393   // Therefore we load the PC into tmp1 and let set_last_Java_frame() save
3394   // it into the frame anchor.
3395   get_PC(tmp1);
3396   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);
3397 }
3398 
3399 void MacroAssembler::set_thread_state(JavaThreadState new_state) {
3400   z_release();
3401 
3402   assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");
3403   assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");
3404   store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);
3405 }
3406 
3407 void MacroAssembler::get_vm_result(Register oop_result) {
3408   z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3409   clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));
3410 
3411   verify_oop(oop_result, FILE_AND_LINE);
3412 }
3413 
3414 void MacroAssembler::get_vm_result_2(Register result) {
3415   z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));
3416   clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));
3417 }
3418 
3419 // We require that C code which does not return a value in vm_result will
3420 // leave it undisturbed.
3421 void MacroAssembler::set_vm_result(Register oop_result) {
3422   z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3423 }
3424 
3425 // Explicit null checks (used for method handle code).
3426 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
3427   if (!ImplicitNullChecks) {
3428     NearLabel ok;
3429 
3430     compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);
3431 
3432     // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).
3433     address exception_entry = Interpreter::throw_NullPointerException_entry();
3434     load_absolute_address(reg, exception_entry);
3435     z_br(reg);
3436 
3437     bind(ok);
3438   } else {
3439     if (needs_explicit_null_check((intptr_t)offset)) {
3440       // Provoke OS null exception if reg is null by
3441       // accessing M[reg] w/o changing any registers.
3442       z_lg(tmp, 0, reg);
3443     }
3444     // else
3445       // Nothing to do, (later) access of M[reg + offset]
3446       // will provoke OS null exception if reg is null.
3447   }
3448 }
3449 
3450 //-------------------------------------
3451 //  Compressed Klass Pointers
3452 //-------------------------------------
3453 
3454 // Klass oop manipulations if compressed.
3455 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3456   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3457   address  base    = CompressedKlassPointers::base();
3458   int      shift   = CompressedKlassPointers::shift();
3459   bool     need_zero_extend = base != 0;
3460   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3461 
3462   BLOCK_COMMENT("cKlass encoder {");
3463 
3464 #ifdef ASSERT
3465   Label ok;
3466   z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3467   z_brc(Assembler::bcondAllZero, ok);
3468   // The plain disassembler does not recognize illtrap. It instead displays
3469   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3470   // the proper beginning of the next instruction.
3471   z_illtrap(0xee);
3472   z_illtrap(0xee);
3473   bind(ok);
3474 #endif
3475 
3476   // Scale down the incoming klass pointer first.
3477   // We then can be sure we calculate an offset that fits into 32 bit.
3478   // More generally speaking: all subsequent calculations are purely 32-bit.
3479   if (shift != 0) {
3480     assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3481     z_srlg(dst, current, shift);
3482     current = dst;
3483   }
3484 
3485   if (base != nullptr) {
3486     // Use scaled-down base address parts to match scaled-down klass pointer.
3487     unsigned int base_h = ((unsigned long)base)>>(32+shift);
3488     unsigned int base_l = (unsigned int)(((unsigned long)base)>>shift);
3489 
3490     // General considerations:
3491     //  - when calculating (current_h - base_h), all digits must cancel (become 0).
3492     //    Otherwise, we would end up with a compressed klass pointer which doesn't
3493     //    fit into 32-bit.
3494     //  - Only bit#33 of the difference could potentially be non-zero. For that
3495     //    to happen, (current_l < base_l) must hold. In this case, the subtraction
3496     //    will create a borrow out of bit#32, nicely killing bit#33.
3497     //  - With the above, we only need to consider current_l and base_l to
3498     //    calculate the result.
3499     //  - Both values are treated as unsigned. The unsigned subtraction is
3500     //    replaced by adding (unsigned) the 2's complement of the subtrahend.
3501 
3502     if (base_l == 0) {
3503       //  - By theory, the calculation to be performed here (current_h - base_h) MUST
3504       //    cancel all high-word bits. Otherwise, we would end up with an offset
3505       //    (i.e. compressed klass pointer) that does not fit into 32 bit.
3506       //  - current_l remains unchanged.
3507       //  - Therefore, we can replace all calculation with just a
3508       //    zero-extending load 32 to 64 bit.
3509       //  - Even that can be replaced with a conditional load if dst != current.
3510       //    (this is a local view. The shift step may have requested zero-extension).
3511     } else {
3512       if ((base_h == 0) && is_uimm(base_l, 31)) {
3513         // If we happen to find that (base_h == 0), and that base_l is within the range
3514         // which can be represented by a signed int, then we can use 64bit signed add with
3515         // (-base_l) as 32bit signed immediate operand. The add will take care of the
3516         // upper 32 bits of the result, saving us the need of an extra zero extension.
3517         // For base_l to be in the required range, it must not have the most significant
3518         // bit (aka sign bit) set.
3519         lgr_if_needed(dst, current); // no zero/sign extension in this case!
3520         z_agfi(dst, -(int)base_l);   // base_l must be passed as signed.
3521         need_zero_extend = false;
3522         current = dst;
3523       } else {
3524         // To begin with, we may need to copy and/or zero-extend the register operand.
3525         // We have to calculate (current_l - base_l). Because there is no unsigend
3526         // subtract instruction with immediate operand, we add the 2's complement of base_l.
3527         if (need_zero_extend) {
3528           z_llgfr(dst, current);
3529           need_zero_extend = false;
3530         } else {
3531           llgfr_if_needed(dst, current);
3532         }
3533         current = dst;
3534         z_alfi(dst, -base_l);
3535       }
3536     }
3537   }
3538 
3539   if (need_zero_extend) {
3540     // We must zero-extend the calculated result. It may have some leftover bits in
3541     // the hi-word because we only did optimized calculations.
3542     z_llgfr(dst, current);
3543   } else {
3544     llgfr_if_needed(dst, current); // zero-extension while copying comes at no extra cost.
3545   }
3546 
3547   BLOCK_COMMENT("} cKlass encoder");
3548 }
3549 
3550 // This function calculates the size of the code generated by
3551 //   decode_klass_not_null(register dst, Register src)
3552 // when Universe::heap() isn't null. Hence, if the instructions
3553 // it generates change, then this method needs to be updated.
3554 int MacroAssembler::instr_size_for_decode_klass_not_null() {
3555   address  base    = CompressedKlassPointers::base();
3556   int shift_size   = CompressedKlassPointers::shift() == 0 ? 0 : 6; /* sllg */
3557   int addbase_size = 0;
3558   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3559 
3560   if (base != nullptr) {
3561     unsigned int base_h = ((unsigned long)base)>>32;
3562     unsigned int base_l = (unsigned int)((unsigned long)base);
3563     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3564       addbase_size += 6; /* aih */
3565     } else if ((base_h == 0) && (base_l != 0)) {
3566       addbase_size += 6; /* algfi */
3567     } else {
3568       addbase_size += load_const_size();
3569       addbase_size += 4; /* algr */
3570     }
3571   }
3572 #ifdef ASSERT
3573   addbase_size += 10;
3574   addbase_size += 2; // Extra sigill.
3575 #endif
3576   return addbase_size + shift_size;
3577 }
3578 
3579 // !!! If the instructions that get generated here change
3580 //     then function instr_size_for_decode_klass_not_null()
3581 //     needs to get updated.
3582 // This variant of decode_klass_not_null() must generate predictable code!
3583 // The code must only depend on globally known parameters.
3584 void MacroAssembler::decode_klass_not_null(Register dst) {
3585   address  base    = CompressedKlassPointers::base();
3586   int      shift   = CompressedKlassPointers::shift();
3587   int      beg_off = offset();
3588   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3589 
3590   BLOCK_COMMENT("cKlass decoder (const size) {");
3591 
3592   if (shift != 0) { // Shift required?
3593     z_sllg(dst, dst, shift);
3594   }
3595   if (base != nullptr) {
3596     unsigned int base_h = ((unsigned long)base)>>32;
3597     unsigned int base_l = (unsigned int)((unsigned long)base);
3598     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3599       z_aih(dst, base_h);     // Base has no set bits in lower half.
3600     } else if ((base_h == 0) && (base_l != 0)) {
3601       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3602     } else {
3603       load_const(Z_R0, base); // Base has set bits everywhere.
3604       z_algr(dst, Z_R0);
3605     }
3606   }
3607 
3608 #ifdef ASSERT
3609   Label ok;
3610   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3611   z_brc(Assembler::bcondAllZero, ok);
3612   // The plain disassembler does not recognize illtrap. It instead displays
3613   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3614   // the proper beginning of the next instruction.
3615   z_illtrap(0xd1);
3616   z_illtrap(0xd1);
3617   bind(ok);
3618 #endif
3619   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
3620 
3621   BLOCK_COMMENT("} cKlass decoder (const size)");
3622 }
3623 
3624 // This variant of decode_klass_not_null() is for cases where
3625 //  1) the size of the generated instructions may vary
3626 //  2) the result is (potentially) stored in a register different from the source.
3627 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3628   address base  = CompressedKlassPointers::base();
3629   int     shift = CompressedKlassPointers::shift();
3630   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3631 
3632   BLOCK_COMMENT("cKlass decoder {");
3633 
3634   if (src == noreg) src = dst;
3635 
3636   if (shift != 0) { // Shift or at least move required?
3637     z_sllg(dst, src, shift);
3638   } else {
3639     lgr_if_needed(dst, src);
3640   }
3641 
3642   if (base != nullptr) {
3643     unsigned int base_h = ((unsigned long)base)>>32;
3644     unsigned int base_l = (unsigned int)((unsigned long)base);
3645     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3646       z_aih(dst, base_h);     // Base has not set bits in lower half.
3647     } else if ((base_h == 0) && (base_l != 0)) {
3648       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3649     } else {
3650       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
3651       z_algr(dst, Z_R0);
3652     }
3653   }
3654 
3655 #ifdef ASSERT
3656   Label ok;
3657   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3658   z_brc(Assembler::bcondAllZero, ok);
3659   // The plain disassembler does not recognize illtrap. It instead displays
3660   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3661   // the proper beginning of the next instruction.
3662   z_illtrap(0xd2);
3663   z_illtrap(0xd2);
3664   bind(ok);
3665 #endif
3666   BLOCK_COMMENT("} cKlass decoder");
3667 }
3668 
3669 void MacroAssembler::load_klass(Register klass, Address mem) {
3670   if (UseCompressedClassPointers) {
3671     z_llgf(klass, mem);
3672     // Attention: no null check here!
3673     decode_klass_not_null(klass);
3674   } else {
3675     z_lg(klass, mem);
3676   }
3677 }
3678 
3679 void MacroAssembler::load_klass(Register klass, Register src_oop) {
3680   if (UseCompressedClassPointers) {
3681     z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3682     // Attention: no null check here!
3683     decode_klass_not_null(klass);
3684   } else {
3685     z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3686   }
3687 }
3688 
3689 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
3690   if (UseCompressedClassPointers) {
3691     assert_different_registers(dst_oop, klass, Z_R0);
3692     if (ck == noreg) ck = klass;
3693     encode_klass_not_null(ck, klass);
3694     z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3695   } else {
3696     z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3697   }
3698 }
3699 
3700 void MacroAssembler::store_klass_gap(Register s, Register d) {
3701   if (UseCompressedClassPointers) {
3702     assert(s != d, "not enough registers");
3703     // Support s = noreg.
3704     if (s != noreg) {
3705       z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
3706     } else {
3707       z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
3708     }
3709   }
3710 }
3711 
3712 // Compare klass ptr in memory against klass ptr in register.
3713 //
3714 // Rop1            - klass in register, always uncompressed.
3715 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
3716 // Rbase           - Base address of cKlass in memory.
3717 // maybenull       - True if Rop1 possibly is a null.
3718 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybenull) {
3719 
3720   BLOCK_COMMENT("compare klass ptr {");
3721 
3722   if (UseCompressedClassPointers) {
3723     const int shift = CompressedKlassPointers::shift();
3724     address   base  = CompressedKlassPointers::base();
3725 
3726     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
3727     assert_different_registers(Rop1, Z_R0);
3728     assert_different_registers(Rop1, Rbase, Z_R1);
3729 
3730     // First encode register oop and then compare with cOop in memory.
3731     // This sequence saves an unnecessary cOop load and decode.
3732     if (base == nullptr) {
3733       if (shift == 0) {
3734         z_cl(Rop1, disp, Rbase);     // Unscaled
3735       } else {
3736         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
3737         z_cl(Z_R0, disp, Rbase);
3738       }
3739     } else {                         // HeapBased
3740 #ifdef ASSERT
3741       bool     used_R0 = true;
3742       bool     used_R1 = true;
3743 #endif
3744       Register current = Rop1;
3745       Label    done;
3746 
3747       if (maybenull) {       // null pointer must be preserved!
3748         z_ltgr(Z_R0, current);
3749         z_bre(done);
3750         current = Z_R0;
3751       }
3752 
3753       unsigned int base_h = ((unsigned long)base)>>32;
3754       unsigned int base_l = (unsigned int)((unsigned long)base);
3755       if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3756         lgr_if_needed(Z_R0, current);
3757         z_aih(Z_R0, -((int)base_h));     // Base has no set bits in lower half.
3758       } else if ((base_h == 0) && (base_l != 0)) {
3759         lgr_if_needed(Z_R0, current);
3760         z_agfi(Z_R0, -(int)base_l);
3761       } else {
3762         int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
3763         add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
3764       }
3765 
3766       if (shift != 0) {
3767         z_srlg(Z_R0, Z_R0, shift);
3768       }
3769       bind(done);
3770       z_cl(Z_R0, disp, Rbase);
3771 #ifdef ASSERT
3772       if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
3773       if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
3774 #endif
3775     }
3776   } else {
3777     z_clg(Rop1, disp, Z_R0, Rbase);
3778   }
3779   BLOCK_COMMENT("} compare klass ptr");
3780 }
3781 
3782 //---------------------------
3783 //  Compressed oops
3784 //---------------------------
3785 
3786 void MacroAssembler::encode_heap_oop(Register oop) {
3787   oop_encoder(oop, oop, true /*maybe null*/);
3788 }
3789 
3790 void MacroAssembler::encode_heap_oop_not_null(Register oop) {
3791   oop_encoder(oop, oop, false /*not null*/);
3792 }
3793 
3794 // Called with something derived from the oop base. e.g. oop_base>>3.
3795 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {
3796   unsigned int oop_base_ll = ((unsigned int)(oop_base >>  0)) & 0xffff;
3797   unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;
3798   unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;
3799   unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;
3800   unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)
3801                                + (oop_base_lh == 0 ? 0:1)
3802                                + (oop_base_hl == 0 ? 0:1)
3803                                + (oop_base_hh == 0 ? 0:1);
3804 
3805   assert(oop_base != 0, "This is for HeapBased cOops only");
3806 
3807   if (n_notzero_parts != 1) { //  Check if oop_base is just a few pages shy of a power of 2.
3808     uint64_t pow2_offset = 0x10000 - oop_base_ll;
3809     if (pow2_offset < 0x8000) {  // This might not be necessary.
3810       uint64_t oop_base2 = oop_base + pow2_offset;
3811 
3812       oop_base_ll = ((unsigned int)(oop_base2 >>  0)) & 0xffff;
3813       oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;
3814       oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;
3815       oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;
3816       n_notzero_parts = (oop_base_ll == 0 ? 0:1) +
3817                         (oop_base_lh == 0 ? 0:1) +
3818                         (oop_base_hl == 0 ? 0:1) +
3819                         (oop_base_hh == 0 ? 0:1);
3820       if (n_notzero_parts == 1) {
3821         assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");
3822         return -pow2_offset;
3823       }
3824     }
3825   }
3826   return 0;
3827 }
3828 
3829 // If base address is offset from a straight power of two by just a few pages,
3830 // return this offset to the caller for a possible later composite add.
3831 // TODO/FIX: will only work correctly for 4k pages.
3832 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {
3833   int pow2_offset = get_oop_base_pow2_offset(oop_base);
3834 
3835   load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.
3836 
3837   return pow2_offset;
3838 }
3839 
3840 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
3841   int offset = get_oop_base(Rbase, oop_base);
3842   z_lcgr(Rbase, Rbase);
3843   return -offset;
3844 }
3845 
3846 // Compare compressed oop in memory against oop in register.
3847 // Rop1            - Oop in register.
3848 // disp            - Offset of cOop in memory.
3849 // Rbase           - Base address of cOop in memory.
3850 // maybenull       - True if Rop1 possibly is a null.
3851 // maybenulltarget - Branch target for Rop1 == nullptr, if flow control shall NOT continue with compare instruction.
3852 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybenull) {
3853   Register Rbase  = mem.baseOrR0();
3854   Register Rindex = mem.indexOrR0();
3855   int64_t  disp   = mem.disp();
3856 
3857   const int shift = CompressedOops::shift();
3858   address   base  = CompressedOops::base();
3859 
3860   assert(UseCompressedOops, "must be on to call this method");
3861   assert(Universe::heap() != nullptr, "java heap must be initialized to call this method");
3862   assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
3863   assert_different_registers(Rop1, Z_R0);
3864   assert_different_registers(Rop1, Rbase, Z_R1);
3865   assert_different_registers(Rop1, Rindex, Z_R1);
3866 
3867   BLOCK_COMMENT("compare heap oop {");
3868 
3869   // First encode register oop and then compare with cOop in memory.
3870   // This sequence saves an unnecessary cOop load and decode.
3871   if (base == nullptr) {
3872     if (shift == 0) {
3873       z_cl(Rop1, disp, Rindex, Rbase);  // Unscaled
3874     } else {
3875       z_srlg(Z_R0, Rop1, shift);        // ZeroBased
3876       z_cl(Z_R0, disp, Rindex, Rbase);
3877     }
3878   } else {                              // HeapBased
3879 #ifdef ASSERT
3880     bool  used_R0 = true;
3881     bool  used_R1 = true;
3882 #endif
3883     Label done;
3884     int   pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
3885 
3886     if (maybenull) {       // null pointer must be preserved!
3887       z_ltgr(Z_R0, Rop1);
3888       z_bre(done);
3889     }
3890 
3891     add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);
3892     z_srlg(Z_R0, Z_R0, shift);
3893 
3894     bind(done);
3895     z_cl(Z_R0, disp, Rindex, Rbase);
3896 #ifdef ASSERT
3897     if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
3898     if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
3899 #endif
3900   }
3901   BLOCK_COMMENT("} compare heap oop");
3902 }
3903 
3904 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
3905                                      const Address& addr, Register val,
3906                                      Register tmp1, Register tmp2, Register tmp3) {
3907   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
3908                          ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
3909   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3910   decorators = AccessInternal::decorator_fixup(decorators, type);
3911   bool as_raw = (decorators & AS_RAW) != 0;
3912   if (as_raw) {
3913     bs->BarrierSetAssembler::store_at(this, decorators, type,
3914                                       addr, val,
3915                                       tmp1, tmp2, tmp3);
3916   } else {
3917     bs->store_at(this, decorators, type,
3918                  addr, val,
3919                  tmp1, tmp2, tmp3);
3920   }
3921 }
3922 
3923 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
3924                                     const Address& addr, Register dst,
3925                                     Register tmp1, Register tmp2, Label *is_null) {
3926   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
3927                          ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");
3928   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3929   decorators = AccessInternal::decorator_fixup(decorators, type);
3930   bool as_raw = (decorators & AS_RAW) != 0;
3931   if (as_raw) {
3932     bs->BarrierSetAssembler::load_at(this, decorators, type,
3933                                      addr, dst,
3934                                      tmp1, tmp2, is_null);
3935   } else {
3936     bs->load_at(this, decorators, type,
3937                 addr, dst,
3938                 tmp1, tmp2, is_null);
3939   }
3940 }
3941 
3942 void MacroAssembler::load_heap_oop(Register dest, const Address &a,
3943                                    Register tmp1, Register tmp2,
3944                                    DecoratorSet decorators, Label *is_null) {
3945   access_load_at(T_OBJECT, IN_HEAP | decorators, a, dest, tmp1, tmp2, is_null);
3946 }
3947 
3948 void MacroAssembler::store_heap_oop(Register Roop, const Address &a,
3949                                     Register tmp1, Register tmp2, Register tmp3,
3950                                     DecoratorSet decorators) {
3951   access_store_at(T_OBJECT, IN_HEAP | decorators, a, Roop, tmp1, tmp2, tmp3);
3952 }
3953 
3954 //-------------------------------------------------
3955 // Encode compressed oop. Generally usable encoder.
3956 //-------------------------------------------------
3957 // Rsrc - contains regular oop on entry. It remains unchanged.
3958 // Rdst - contains compressed oop on exit.
3959 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.
3960 //
3961 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.
3962 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.
3963 //
3964 // only32bitValid is set, if later code only uses the lower 32 bits. In this
3965 // case we must not fix the upper 32 bits.
3966 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybenull,
3967                                  Register Rbase, int pow2_offset, bool only32bitValid) {
3968 
3969   const address oop_base  = CompressedOops::base();
3970   const int     oop_shift = CompressedOops::shift();
3971   const bool    disjoint  = CompressedOops::base_disjoint();
3972 
3973   assert(UseCompressedOops, "must be on to call this method");
3974   assert(Universe::heap() != nullptr, "java heap must be initialized to call this encoder");
3975   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
3976 
3977   if (disjoint || (oop_base == nullptr)) {
3978     BLOCK_COMMENT("cOop encoder zeroBase {");
3979     if (oop_shift == 0) {
3980       if (oop_base != nullptr && !only32bitValid) {
3981         z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
3982       } else {
3983         lgr_if_needed(Rdst, Rsrc);
3984       }
3985     } else {
3986       z_srlg(Rdst, Rsrc, oop_shift);
3987       if (oop_base != nullptr && !only32bitValid) {
3988         z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
3989       }
3990     }
3991     BLOCK_COMMENT("} cOop encoder zeroBase");
3992     return;
3993   }
3994 
3995   bool used_R0 = false;
3996   bool used_R1 = false;
3997 
3998   BLOCK_COMMENT("cOop encoder general {");
3999   assert_different_registers(Rdst, Z_R1);
4000   assert_different_registers(Rsrc, Rbase);
4001   if (maybenull) {
4002     Label done;
4003     // We reorder shifting and subtracting, so that we can compare
4004     // and shift in parallel:
4005     //
4006     // cycle 0:  potential LoadN, base = <const>
4007     // cycle 1:  base = !base     dst = src >> 3,    cmp cr = (src != 0)
4008     // cycle 2:  if (cr) br,      dst = dst + base + offset
4009 
4010     // Get oop_base components.
4011     if (pow2_offset == -1) {
4012       if (Rdst == Rbase) {
4013         if (Rdst == Z_R1 || Rsrc == Z_R1) {
4014           Rbase = Z_R0;
4015           used_R0 = true;
4016         } else {
4017           Rdst = Z_R1;
4018           used_R1 = true;
4019         }
4020       }
4021       if (Rbase == Z_R1) {
4022         used_R1 = true;
4023       }
4024       pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);
4025     }
4026     assert_different_registers(Rdst, Rbase);
4027 
4028     // Check for null oop (must be left alone) and shift.
4029     if (oop_shift != 0) {  // Shift out alignment bits
4030       if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
4031         z_srag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4032       } else {
4033         z_srlg(Rdst, Rsrc, oop_shift);
4034         z_ltgr(Rsrc, Rsrc);  // This is the recommended way of testing for zero.
4035         // This probably is faster, as it does not write a register. No!
4036         // z_cghi(Rsrc, 0);
4037       }
4038     } else {
4039       z_ltgr(Rdst, Rsrc);   // Move null to result register.
4040     }
4041     z_bre(done);
4042 
4043     // Subtract oop_base components.
4044     if ((Rdst == Z_R0) || (Rbase == Z_R0)) {
4045       z_algr(Rdst, Rbase);
4046       if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }
4047     } else {
4048       add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);
4049     }
4050     if (!only32bitValid) {
4051       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4052     }
4053     bind(done);
4054 
4055   } else {  // not null
4056     // Get oop_base components.
4057     if (pow2_offset == -1) {
4058       pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);
4059     }
4060 
4061     // Subtract oop_base components and shift.
4062     if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {
4063       // Don't use lay instruction.
4064       if (Rdst == Rsrc) {
4065         z_algr(Rdst, Rbase);
4066       } else {
4067         lgr_if_needed(Rdst, Rbase);
4068         z_algr(Rdst, Rsrc);
4069       }
4070       if (pow2_offset != 0) add2reg(Rdst, pow2_offset);
4071     } else {
4072       add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);
4073     }
4074     if (oop_shift != 0) {   // Shift out alignment bits.
4075       z_srlg(Rdst, Rdst, oop_shift);
4076     }
4077     if (!only32bitValid) {
4078       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4079     }
4080   }
4081 #ifdef ASSERT
4082   if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }
4083   if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }
4084 #endif
4085   BLOCK_COMMENT("} cOop encoder general");
4086 }
4087 
4088 //-------------------------------------------------
4089 // decode compressed oop. Generally usable decoder.
4090 //-------------------------------------------------
4091 // Rsrc - contains compressed oop on entry.
4092 // Rdst - contains regular oop on exit.
4093 // Rdst and Rsrc may indicate same register.
4094 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).
4095 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.
4096 // Rbase - register to use for the base
4097 // pow2_offset - offset of base to nice value. If -1, base must be loaded.
4098 // For performance, it is good to
4099 //  - avoid Z_R0 for any of the argument registers.
4100 //  - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
4101 //  - avoid Z_R1 for Rdst if Rdst == Rbase.
4102 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybenull, Register Rbase, int pow2_offset) {
4103 
4104   const address oop_base  = CompressedOops::base();
4105   const int     oop_shift = CompressedOops::shift();
4106   const bool    disjoint  = CompressedOops::base_disjoint();
4107 
4108   assert(UseCompressedOops, "must be on to call this method");
4109   assert(Universe::heap() != nullptr, "java heap must be initialized to call this decoder");
4110   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
4111          "cOop encoder detected bad shift");
4112 
4113   // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
4114 
4115   if (oop_base != nullptr) {
4116     unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
4117     unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
4118     unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
4119     if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {
4120       BLOCK_COMMENT("cOop decoder disjointBase {");
4121       // We do not need to load the base. Instead, we can install the upper bits
4122       // with an OR instead of an ADD.
4123       Label done;
4124 
4125       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4126       if (maybenull) {  // null pointer must be preserved!
4127         z_slag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4128         z_bre(done);
4129       } else {
4130         z_sllg(Rdst, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4131       }
4132       if ((oop_base_hl != 0) && (oop_base_hh != 0)) {
4133         z_oihf(Rdst, oop_base_hf);
4134       } else if (oop_base_hl != 0) {
4135         z_oihl(Rdst, oop_base_hl);
4136       } else {
4137         assert(oop_base_hh != 0, "not heapbased mode");
4138         z_oihh(Rdst, oop_base_hh);
4139       }
4140       bind(done);
4141       BLOCK_COMMENT("} cOop decoder disjointBase");
4142     } else {
4143       BLOCK_COMMENT("cOop decoder general {");
4144       // There are three decode steps:
4145       //   scale oop offset (shift left)
4146       //   get base (in reg) and pow2_offset (constant)
4147       //   add base, pow2_offset, and oop offset
4148       // The following register overlap situations may exist:
4149       // Rdst == Rsrc,  Rbase any other
4150       //   not a problem. Scaling in-place leaves Rbase undisturbed.
4151       //   Loading Rbase does not impact the scaled offset.
4152       // Rdst == Rbase, Rsrc  any other
4153       //   scaling would destroy a possibly preloaded Rbase. Loading Rbase
4154       //   would destroy the scaled offset.
4155       //   Remedy: use Rdst_tmp if Rbase has been preloaded.
4156       //           use Rbase_tmp if base has to be loaded.
4157       // Rsrc == Rbase, Rdst  any other
4158       //   Only possible without preloaded Rbase.
4159       //   Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.
4160       // Rsrc == Rbase, Rdst == Rbase
4161       //   Only possible without preloaded Rbase.
4162       //   Loading Rbase would destroy compressed oop. Scaling in-place is ok.
4163       //   Remedy: use Rbase_tmp.
4164       //
4165       Label    done;
4166       Register Rdst_tmp       = Rdst;
4167       Register Rbase_tmp      = Rbase;
4168       bool     used_R0        = false;
4169       bool     used_R1        = false;
4170       bool     base_preloaded = pow2_offset >= 0;
4171       guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");
4172       assert(oop_shift != 0, "room for optimization");
4173 
4174       // Check if we need to use scratch registers.
4175       if (Rdst == Rbase) {
4176         assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");
4177         if (Rdst != Rsrc) {
4178           if (base_preloaded) { Rdst_tmp  = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4179           else                { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4180         } else {
4181           Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;
4182         }
4183       }
4184       if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
4185 
4186       // Scale oop and check for null.
4187       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4188       if (maybenull) {  // null pointer must be preserved!
4189         z_slag(Rdst_tmp, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4190         z_bre(done);
4191       } else {
4192         z_sllg(Rdst_tmp, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4193       }
4194 
4195       // Get oop_base components.
4196       if (!base_preloaded) {
4197         pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);
4198       }
4199 
4200       // Add up all components.
4201       if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {
4202         z_algr(Rdst_tmp, Rbase_tmp);
4203         if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }
4204       } else {
4205         add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);
4206       }
4207 
4208       bind(done);
4209       lgr_if_needed(Rdst, Rdst_tmp);
4210 #ifdef ASSERT
4211       if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }
4212       if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }
4213 #endif
4214       BLOCK_COMMENT("} cOop decoder general");
4215     }
4216   } else {
4217     BLOCK_COMMENT("cOop decoder zeroBase {");
4218     if (oop_shift == 0) {
4219       lgr_if_needed(Rdst, Rsrc);
4220     } else {
4221       z_sllg(Rdst, Rsrc, oop_shift);
4222     }
4223     BLOCK_COMMENT("} cOop decoder zeroBase");
4224   }
4225 }
4226 
4227 // ((OopHandle)result).resolve();
4228 void MacroAssembler::resolve_oop_handle(Register result) {
4229   // OopHandle::resolve is an indirection.
4230   z_lg(result, 0, result);
4231 }
4232 
4233 void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) {
4234   mem2reg_opt(mirror, Address(const_method, ConstMethod::constants_offset()));
4235   mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset()));
4236   mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
4237   resolve_oop_handle(mirror);
4238 }
4239 
4240 void MacroAssembler::load_method_holder(Register holder, Register method) {
4241   mem2reg_opt(holder, Address(method, Method::const_offset()));
4242   mem2reg_opt(holder, Address(holder, ConstMethod::constants_offset()));
4243   mem2reg_opt(holder, Address(holder, ConstantPool::pool_holder_offset()));
4244 }
4245 
4246 //---------------------------------------------------------------
4247 //---  Operations on arrays.
4248 //---------------------------------------------------------------
4249 
4250 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4251 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4252 // work registers anyway.
4253 // Actually, only r0, r1, and r5 are killed.
4254 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg) {
4255 
4256   int      block_start = offset();
4257   Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
4258   Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
4259 
4260   Label doXC, doMVCLE, done;
4261 
4262   BLOCK_COMMENT("Clear_Array {");
4263 
4264   // Check for zero len and convert to long.
4265   z_ltgfr(odd_tmp_reg, cnt_arg);
4266   z_bre(done);                    // Nothing to do if len == 0.
4267 
4268   // Prefetch data to be cleared.
4269   if (VM_Version::has_Prefetch()) {
4270     z_pfd(0x02,   0, Z_R0, base_pointer_arg);
4271     z_pfd(0x02, 256, Z_R0, base_pointer_arg);
4272   }
4273 
4274   z_sllg(dst_len, odd_tmp_reg, 3); // #bytes to clear.
4275   z_cghi(odd_tmp_reg, 32);         // Check for len <= 256 bytes (<=32 DW).
4276   z_brnh(doXC);                    // If so, use executed XC to clear.
4277 
4278   // MVCLE: initialize long arrays (general case).
4279   bind(doMVCLE);
4280   z_lgr(dst_addr, base_pointer_arg);
4281   // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
4282   // The even register of the register pair is not killed.
4283   clear_reg(odd_tmp_reg, true, false);
4284   MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding()-1), 0);
4285   z_bru(done);
4286 
4287   // XC: initialize short arrays.
4288   Label XC_template; // Instr template, never exec directly!
4289     bind(XC_template);
4290     z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
4291 
4292   bind(doXC);
4293     add2reg(dst_len, -1);               // Get #bytes-1 for EXECUTE.
4294     if (VM_Version::has_ExecuteExtensions()) {
4295       z_exrl(dst_len, XC_template);     // Execute XC with var. len.
4296     } else {
4297       z_larl(odd_tmp_reg, XC_template);
4298       z_ex(dst_len,0,Z_R0,odd_tmp_reg); // Execute XC with var. len.
4299     }
4300     // z_bru(done);      // fallthru
4301 
4302   bind(done);
4303 
4304   BLOCK_COMMENT("} Clear_Array");
4305 
4306   int block_end = offset();
4307   return block_end - block_start;
4308 }
4309 
4310 // Compiler ensures base is doubleword aligned and cnt is count of doublewords.
4311 // Emitter does not KILL any arguments nor work registers.
4312 // Emitter generates up to 16 XC instructions, depending on the array length.
4313 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {
4314   int  block_start    = offset();
4315   int  off;
4316   int  lineSize_Bytes = AllocatePrefetchStepSize;
4317   int  lineSize_DW    = AllocatePrefetchStepSize>>LogBytesPerWord;
4318   bool doPrefetch     = VM_Version::has_Prefetch();
4319   int  XC_maxlen      = 256;
4320   int  numXCInstr     = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;
4321 
4322   BLOCK_COMMENT("Clear_Array_Const {");
4323   assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");
4324 
4325   // Do less prefetching for very short arrays.
4326   if (numXCInstr > 0) {
4327     // Prefetch only some cache lines, then begin clearing.
4328     if (doPrefetch) {
4329       if (cnt*BytesPerWord <= lineSize_Bytes/4) {  // If less than 1/4 of a cache line to clear,
4330         z_pfd(0x02, 0, Z_R0, base);                // prefetch just the first cache line.
4331       } else {
4332         assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");
4333         for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {
4334           z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);
4335         }
4336       }
4337     }
4338 
4339     for (off=0; off<(numXCInstr-1); off++) {
4340       z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);
4341 
4342       // Prefetch some cache lines in advance.
4343       if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {
4344         z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);
4345       }
4346     }
4347     if (off*XC_maxlen < cnt*BytesPerWord) {
4348       z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);
4349     }
4350   }
4351   BLOCK_COMMENT("} Clear_Array_Const");
4352 
4353   int block_end = offset();
4354   return block_end - block_start;
4355 }
4356 
4357 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4358 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4359 // work registers anyway.
4360 // Actually, only r0, r1, (which are work registers) and odd_tmp_reg are killed.
4361 //
4362 // For very large arrays, exploit MVCLE H/W support.
4363 // MVCLE instruction automatically exploits H/W-optimized page mover.
4364 // - Bytes up to next page boundary are cleared with a series of XC to self.
4365 // - All full pages are cleared with the page mover H/W assist.
4366 // - Remaining bytes are again cleared by a series of XC to self.
4367 //
4368 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg) {
4369 
4370   int      block_start = offset();
4371   Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
4372   Register dst_addr = Z_R0;      // Holds dst addr for MVCLE.
4373 
4374   BLOCK_COMMENT("Clear_Array_Const_Big {");
4375 
4376   // Get len to clear.
4377   load_const_optimized(dst_len, (long)cnt*8L);  // in Bytes = #DW*8
4378 
4379   // Prepare other args to MVCLE.
4380   z_lgr(dst_addr, base_pointer_arg);
4381   // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
4382   // The even register of the register pair is not killed.
4383   (void) clear_reg(odd_tmp_reg, true, false);  // Src len of MVCLE is zero.
4384   MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding() - 1), 0);
4385   BLOCK_COMMENT("} Clear_Array_Const_Big");
4386 
4387   int block_end = offset();
4388   return block_end - block_start;
4389 }
4390 
4391 // Allocator.
4392 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
4393                                                            Register cnt_reg,
4394                                                            Register tmp1_reg, Register tmp2_reg) {
4395   // Tmp1 is oddReg.
4396   // Tmp2 is evenReg.
4397 
4398   int block_start = offset();
4399   Label doMVC, doMVCLE, done, MVC_template;
4400 
4401   BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");
4402 
4403   // Check for zero len and convert to long.
4404   z_ltgfr(cnt_reg, cnt_reg);      // Remember casted value for doSTG case.
4405   z_bre(done);                    // Nothing to do if len == 0.
4406 
4407   z_sllg(Z_R1, cnt_reg, 3);       // Dst len in bytes. calc early to have the result ready.
4408 
4409   z_cghi(cnt_reg, 32);            // Check for len <= 256 bytes (<=32 DW).
4410   z_brnh(doMVC);                  // If so, use executed MVC to clear.
4411 
4412   bind(doMVCLE);                  // A lot of data (more than 256 bytes).
4413   // Prep dest reg pair.
4414   z_lgr(Z_R0, dst_reg);           // dst addr
4415   // Dst len already in Z_R1.
4416   // Prep src reg pair.
4417   z_lgr(tmp2_reg, src_reg);       // src addr
4418   z_lgr(tmp1_reg, Z_R1);          // Src len same as dst len.
4419 
4420   // Do the copy.
4421   move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.
4422   z_bru(done);                         // All done.
4423 
4424   bind(MVC_template);             // Just some data (not more than 256 bytes).
4425   z_mvc(0, 0, dst_reg, 0, src_reg);
4426 
4427   bind(doMVC);
4428 
4429   if (VM_Version::has_ExecuteExtensions()) {
4430     add2reg(Z_R1, -1);
4431   } else {
4432     add2reg(tmp1_reg, -1, Z_R1);
4433     z_larl(Z_R1, MVC_template);
4434   }
4435 
4436   if (VM_Version::has_Prefetch()) {
4437     z_pfd(1,  0,Z_R0,src_reg);
4438     z_pfd(2,  0,Z_R0,dst_reg);
4439     //    z_pfd(1,256,Z_R0,src_reg);    // Assume very short copy.
4440     //    z_pfd(2,256,Z_R0,dst_reg);
4441   }
4442 
4443   if (VM_Version::has_ExecuteExtensions()) {
4444     z_exrl(Z_R1, MVC_template);
4445   } else {
4446     z_ex(tmp1_reg, 0, Z_R0, Z_R1);
4447   }
4448 
4449   bind(done);
4450 
4451   BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");
4452 
4453   int block_end = offset();
4454   return block_end - block_start;
4455 }
4456 
4457 //-------------------------------------------------
4458 //   Constants (scalar and oop) in constant pool
4459 //-------------------------------------------------
4460 
4461 // Add a non-relocated constant to the CP.
4462 int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
4463   long    value  = val.value();
4464   address tocPos = long_constant(value);
4465 
4466   if (tocPos != nullptr) {
4467     int tocOffset = (int)(tocPos - code()->consts()->start());
4468     return tocOffset;
4469   }
4470   // Address_constant returned null, so no constant entry has been created.
4471   // In that case, we return a "fatal" offset, just in case that subsequently
4472   // generated access code is executed.
4473   return -1;
4474 }
4475 
4476 // Returns the TOC offset where the address is stored.
4477 // Add a relocated constant to the CP.
4478 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
4479   // Use RelocationHolder::none for the constant pool entry.
4480   // Otherwise we will end up with a failing NativeCall::verify(x),
4481   // where x is the address of the constant pool entry.
4482   address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
4483 
4484   if (tocPos != nullptr) {
4485     int              tocOffset = (int)(tocPos - code()->consts()->start());
4486     RelocationHolder rsp = oop.rspec();
4487     Relocation      *rel = rsp.reloc();
4488 
4489     // Store toc_offset in relocation, used by call_far_patchable.
4490     if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {
4491       ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);
4492     }
4493     // Relocate at the load's pc.
4494     relocate(rsp);
4495 
4496     return tocOffset;
4497   }
4498   // Address_constant returned null, so no constant entry has been created
4499   // in that case, we return a "fatal" offset, just in case that subsequently
4500   // generated access code is executed.
4501   return -1;
4502 }
4503 
4504 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
4505   int     tocOffset = store_const_in_toc(a);
4506   if (tocOffset == -1) return false;
4507   address tocPos    = tocOffset + code()->consts()->start();
4508   assert((address)code()->consts()->start() != nullptr, "Please add CP address");
4509   relocate(a.rspec());
4510   load_long_pcrelative(dst, tocPos);
4511   return true;
4512 }
4513 
4514 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
4515   int     tocOffset = store_oop_in_toc(a);
4516   if (tocOffset == -1) return false;
4517   address tocPos    = tocOffset + code()->consts()->start();
4518   assert((address)code()->consts()->start() != nullptr, "Please add CP address");
4519 
4520   load_addr_pcrelative(dst, tocPos);
4521   return true;
4522 }
4523 
4524 // If the instruction sequence at the given pc is a load_const_from_toc
4525 // sequence, return the value currently stored at the referenced position
4526 // in the TOC.
4527 intptr_t MacroAssembler::get_const_from_toc(address pc) {
4528 
4529   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
4530 
4531   long    offset  = get_load_const_from_toc_offset(pc);
4532   address dataLoc = nullptr;
4533   if (is_load_const_from_toc_pcrelative(pc)) {
4534     dataLoc = pc + offset;
4535   } else {
4536     CodeBlob* cb = CodeCache::find_blob(pc);
4537     assert(cb && cb->is_nmethod(), "sanity");
4538     nmethod* nm = (nmethod*)cb;
4539     dataLoc = nm->ctable_begin() + offset;
4540   }
4541   return *(intptr_t *)dataLoc;
4542 }
4543 
4544 // If the instruction sequence at the given pc is a load_const_from_toc
4545 // sequence, copy the passed-in new_data value into the referenced
4546 // position in the TOC.
4547 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {
4548   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
4549 
4550   long    offset = MacroAssembler::get_load_const_from_toc_offset(pc);
4551   address dataLoc = nullptr;
4552   if (is_load_const_from_toc_pcrelative(pc)) {
4553     dataLoc = pc+offset;
4554   } else {
4555     nmethod* nm = CodeCache::find_nmethod(pc);
4556     assert((cb == nullptr) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
4557     dataLoc = nm->ctable_begin() + offset;
4558   }
4559   if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
4560     *(unsigned long *)dataLoc = new_data;
4561   }
4562 }
4563 
4564 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc
4565 // site. Verify by calling is_load_const_from_toc() before!!
4566 // Offset is +/- 2**32 -> use long.
4567 long MacroAssembler::get_load_const_from_toc_offset(address a) {
4568   assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");
4569   //  expected code sequence:
4570   //    z_lgrl(t, simm32);    len = 6
4571   unsigned long inst;
4572   unsigned int  len = get_instruction(a, &inst);
4573   return get_pcrel_offset(inst);
4574 }
4575 
4576 //**********************************************************************************
4577 //  inspection of generated instruction sequences for a particular pattern
4578 //**********************************************************************************
4579 
4580 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {
4581 #ifdef ASSERT
4582   unsigned long inst;
4583   unsigned int  len = get_instruction(a+2, &inst);
4584   if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {
4585     const int range = 128;
4586     Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");
4587     VM_Version::z_SIGSEGV();
4588   }
4589 #endif
4590   // expected code sequence:
4591   //   z_lgrl(t, relAddr32);    len = 6
4592   //TODO: verify accessed data is in CP, if possible.
4593   return is_load_pcrelative_long(a);  // TODO: might be too general. Currently, only lgrl is used.
4594 }
4595 
4596 bool MacroAssembler::is_load_const_from_toc_call(address a) {
4597   return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());
4598 }
4599 
4600 bool MacroAssembler::is_load_const_call(address a) {
4601   return is_load_const(a) && is_call_byregister(a + load_const_size());
4602 }
4603 
4604 //-------------------------------------------------
4605 //   Emitters for some really CICS instructions
4606 //-------------------------------------------------
4607 
4608 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {
4609   assert(dst->encoding()%2==0, "must be an even/odd register pair");
4610   assert(src->encoding()%2==0, "must be an even/odd register pair");
4611   assert(pad<256, "must be a padding BYTE");
4612 
4613   Label retry;
4614   bind(retry);
4615   Assembler::z_mvcle(dst, src, pad);
4616   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4617 }
4618 
4619 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {
4620   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
4621   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
4622   assert(pad<256, "must be a padding BYTE");
4623 
4624   Label retry;
4625   bind(retry);
4626   Assembler::z_clcle(left, right, pad, Z_R0);
4627   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4628 }
4629 
4630 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {
4631   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
4632   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
4633   assert(pad<=0xfff, "must be a padding HALFWORD");
4634   assert(VM_Version::has_ETF2(), "instruction must be available");
4635 
4636   Label retry;
4637   bind(retry);
4638   Assembler::z_clclu(left, right, pad, Z_R0);
4639   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4640 }
4641 
4642 void MacroAssembler::search_string(Register end, Register start) {
4643   assert(end->encoding() != 0, "end address must not be in R0");
4644   assert(start->encoding() != 0, "start address must not be in R0");
4645 
4646   Label retry;
4647   bind(retry);
4648   Assembler::z_srst(end, start);
4649   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4650 }
4651 
4652 void MacroAssembler::search_string_uni(Register end, Register start) {
4653   assert(end->encoding() != 0, "end address must not be in R0");
4654   assert(start->encoding() != 0, "start address must not be in R0");
4655   assert(VM_Version::has_ETF3(), "instruction must be available");
4656 
4657   Label retry;
4658   bind(retry);
4659   Assembler::z_srstu(end, start);
4660   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4661 }
4662 
4663 void MacroAssembler::kmac(Register srcBuff) {
4664   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4665   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
4666 
4667   Label retry;
4668   bind(retry);
4669   Assembler::z_kmac(Z_R0, srcBuff);
4670   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4671 }
4672 
4673 void MacroAssembler::kimd(Register srcBuff) {
4674   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4675   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
4676 
4677   Label retry;
4678   bind(retry);
4679   Assembler::z_kimd(Z_R0, srcBuff);
4680   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4681 }
4682 
4683 void MacroAssembler::klmd(Register srcBuff) {
4684   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4685   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
4686 
4687   Label retry;
4688   bind(retry);
4689   Assembler::z_klmd(Z_R0, srcBuff);
4690   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4691 }
4692 
4693 void MacroAssembler::km(Register dstBuff, Register srcBuff) {
4694   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
4695   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
4696   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4697   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
4698   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4699 
4700   Label retry;
4701   bind(retry);
4702   Assembler::z_km(dstBuff, srcBuff);
4703   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4704 }
4705 
4706 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {
4707   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
4708   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
4709   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4710   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
4711   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4712 
4713   Label retry;
4714   bind(retry);
4715   Assembler::z_kmc(dstBuff, srcBuff);
4716   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4717 }
4718 
4719 void MacroAssembler::kmctr(Register dstBuff, Register ctrBuff, Register srcBuff) {
4720   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
4721   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
4722   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4723   assert(dstBuff->encoding()     != 0, "dst buffer address can't be in Z_R0");
4724   assert(ctrBuff->encoding()     != 0, "ctr buffer address can't be in Z_R0");
4725   assert(ctrBuff->encoding() % 2 == 0, "ctr buffer addr must be an even register");
4726   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
4727   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4728 
4729   Label retry;
4730   bind(retry);
4731   Assembler::z_kmctr(dstBuff, ctrBuff, srcBuff);
4732   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4733 }
4734 
4735 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {
4736   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4737 
4738   Label retry;
4739   bind(retry);
4740   Assembler::z_cksm(crcBuff, srcBuff);
4741   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4742 }
4743 
4744 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {
4745   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4746   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4747 
4748   Label retry;
4749   bind(retry);
4750   Assembler::z_troo(r1, r2, m3);
4751   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4752 }
4753 
4754 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {
4755   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4756   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4757 
4758   Label retry;
4759   bind(retry);
4760   Assembler::z_trot(r1, r2, m3);
4761   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4762 }
4763 
4764 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
4765   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4766   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4767 
4768   Label retry;
4769   bind(retry);
4770   Assembler::z_trto(r1, r2, m3);
4771   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4772 }
4773 
4774 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
4775   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4776   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4777 
4778   Label retry;
4779   bind(retry);
4780   Assembler::z_trtt(r1, r2, m3);
4781   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4782 }
4783 
4784 //---------------------------------------
4785 // Helpers for Intrinsic Emitters
4786 //---------------------------------------
4787 
4788 /**
4789  * uint32_t crc;
4790  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
4791  */
4792 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
4793   assert_different_registers(crc, table, tmp);
4794   assert_different_registers(val, table);
4795   if (crc == val) {      // Must rotate first to use the unmodified value.
4796     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
4797     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
4798   } else {
4799     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
4800     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
4801   }
4802   z_x(crc, Address(table, tmp, 0));
4803 }
4804 
4805 /**
4806  * uint32_t crc;
4807  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
4808  */
4809 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
4810   fold_byte_crc32(crc, crc, table, tmp);
4811 }
4812 
4813 /**
4814  * Emits code to update CRC-32 with a byte value according to constants in table.
4815  *
4816  * @param [in,out]crc Register containing the crc.
4817  * @param [in]val     Register containing the byte to fold into the CRC.
4818  * @param [in]table   Register containing the table of crc constants.
4819  *
4820  * uint32_t crc;
4821  * val = crc_table[(val ^ crc) & 0xFF];
4822  * crc = val ^ (crc >> 8);
4823  */
4824 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
4825   z_xr(val, crc);
4826   fold_byte_crc32(crc, val, table, val);
4827 }
4828 
4829 
4830 /**
4831  * @param crc   register containing existing CRC (32-bit)
4832  * @param buf   register pointing to input byte buffer (byte*)
4833  * @param len   register containing number of bytes
4834  * @param table register pointing to CRC table
4835  */
4836 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
4837   assert_different_registers(crc, buf, len, table, data);
4838 
4839   Label L_mainLoop, L_done;
4840   const int mainLoop_stepping = 1;
4841 
4842   // Process all bytes in a single-byte loop.
4843   z_ltr(len, len);
4844   z_brnh(L_done);
4845 
4846   bind(L_mainLoop);
4847     z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
4848     add2reg(buf, mainLoop_stepping);        // Advance buffer position.
4849     update_byte_crc32(crc, data, table);
4850     z_brct(len, L_mainLoop);                // Iterate.
4851 
4852   bind(L_done);
4853 }
4854 
4855 /**
4856  * Emits code to update CRC-32 with a 4-byte value according to constants in table.
4857  * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.
4858  *
4859  */
4860 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
4861                                         Register t0,  Register t1,  Register t2,    Register t3) {
4862   // This is what we implement (the DOBIG4 part):
4863   //
4864   // #define DOBIG4 c ^= *++buf4; \
4865   //         c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
4866   //             crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
4867   // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
4868   // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
4869   const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
4870   const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
4871   const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
4872   const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
4873 
4874   // XOR crc with next four bytes of buffer.
4875   lgr_if_needed(t0, crc);
4876   z_x(t0, Address(buf, bufDisp));
4877   if (bufInc != 0) {
4878     add2reg(buf, bufInc);
4879   }
4880 
4881   // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
4882   rotate_then_insert(t3, t0, 56-2, 63-2, 2,    true);  // ((c >>  0) & 0xff) << 2
4883   rotate_then_insert(t2, t0, 56-2, 63-2, 2-8,  true);  // ((c >>  8) & 0xff) << 2
4884   rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true);  // ((c >> 16) & 0xff) << 2
4885   rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true);  // ((c >> 24) & 0xff) << 2
4886 
4887   // XOR indexed table values to calculate updated crc.
4888   z_ly(t2, Address(table, t2, (intptr_t)ix1));
4889   z_ly(t0, Address(table, t0, (intptr_t)ix3));
4890   z_xy(t2, Address(table, t3, (intptr_t)ix0));
4891   z_xy(t0, Address(table, t1, (intptr_t)ix2));
4892   z_xr(t0, t2);           // Now t0 contains the updated CRC value.
4893   lgr_if_needed(crc, t0);
4894 }
4895 
4896 /**
4897  * @param crc   register containing existing CRC (32-bit)
4898  * @param buf   register pointing to input byte buffer (byte*)
4899  * @param len   register containing number of bytes
4900  * @param table register pointing to CRC table
4901  *
4902  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
4903  */
4904 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
4905                                         Register t0,  Register t1,  Register t2,  Register t3,
4906                                         bool invertCRC) {
4907   assert_different_registers(crc, buf, len, table);
4908 
4909   Label L_mainLoop, L_tail;
4910   Register  data = t0;
4911   Register  ctr  = Z_R0;
4912   const int mainLoop_stepping = 4;
4913   const int log_stepping      = exact_log2(mainLoop_stepping);
4914 
4915   // Don't test for len <= 0 here. This pathological case should not occur anyway.
4916   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
4917   // The situation itself is detected and handled correctly by the conditional branches
4918   // following aghi(len, -stepping) and aghi(len, +stepping).
4919 
4920   if (invertCRC) {
4921     not_(crc, noreg, false);           // 1s complement of crc
4922   }
4923 
4924   // Check for short (<4 bytes) buffer.
4925   z_srag(ctr, len, log_stepping);
4926   z_brnh(L_tail);
4927 
4928   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
4929   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
4930 
4931   BIND(L_mainLoop);
4932     update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
4933     z_brct(ctr, L_mainLoop); // Iterate.
4934 
4935   z_lrvr(crc, crc);          // Revert byte order back to original.
4936 
4937   // Process last few (<8) bytes of buffer.
4938   BIND(L_tail);
4939   update_byteLoop_crc32(crc, buf, len, table, data);
4940 
4941   if (invertCRC) {
4942     not_(crc, noreg, false);           // 1s complement of crc
4943   }
4944 }
4945 
4946 /**
4947  * @param crc   register containing existing CRC (32-bit)
4948  * @param buf   register pointing to input byte buffer (byte*)
4949  * @param len   register containing number of bytes
4950  * @param table register pointing to CRC table
4951  */
4952 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
4953                                         Register t0,  Register t1,  Register t2,  Register t3,
4954                                         bool invertCRC) {
4955   assert_different_registers(crc, buf, len, table);
4956   Register data = t0;
4957 
4958   if (invertCRC) {
4959     not_(crc, noreg, false);           // 1s complement of crc
4960   }
4961 
4962   update_byteLoop_crc32(crc, buf, len, table, data);
4963 
4964   if (invertCRC) {
4965     not_(crc, noreg, false);           // 1s complement of crc
4966   }
4967 }
4968 
4969 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
4970                                              bool invertCRC) {
4971   assert_different_registers(crc, buf, len, table, tmp);
4972 
4973   if (invertCRC) {
4974     not_(crc, noreg, false);           // 1s complement of crc
4975   }
4976 
4977   z_llgc(tmp, Address(buf, (intptr_t)0));  // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
4978   update_byte_crc32(crc, tmp, table);
4979 
4980   if (invertCRC) {
4981     not_(crc, noreg, false);           // 1s complement of crc
4982   }
4983 }
4984 
4985 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
4986                                                 bool invertCRC) {
4987   assert_different_registers(crc, val, table);
4988 
4989   if (invertCRC) {
4990     not_(crc, noreg, false);           // 1s complement of crc
4991   }
4992 
4993   update_byte_crc32(crc, val, table);
4994 
4995   if (invertCRC) {
4996     not_(crc, noreg, false);           // 1s complement of crc
4997   }
4998 }
4999 
5000 //
5001 // Code for BigInteger::multiplyToLen() intrinsic.
5002 //
5003 
5004 // dest_lo += src1 + src2
5005 // dest_hi += carry1 + carry2
5006 // Z_R7 is destroyed !
5007 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,
5008                                      Register src1, Register src2) {
5009   clear_reg(Z_R7);
5010   z_algr(dest_lo, src1);
5011   z_alcgr(dest_hi, Z_R7);
5012   z_algr(dest_lo, src2);
5013   z_alcgr(dest_hi, Z_R7);
5014 }
5015 
5016 // Multiply 64 bit by 64 bit first loop.
5017 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,
5018                                            Register x_xstart,
5019                                            Register y, Register y_idx,
5020                                            Register z,
5021                                            Register carry,
5022                                            Register product,
5023                                            Register idx, Register kdx) {
5024   // jlong carry, x[], y[], z[];
5025   // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
5026   //   huge_128 product = y[idx] * x[xstart] + carry;
5027   //   z[kdx] = (jlong)product;
5028   //   carry  = (jlong)(product >>> 64);
5029   // }
5030   // z[xstart] = carry;
5031 
5032   Label L_first_loop, L_first_loop_exit;
5033   Label L_one_x, L_one_y, L_multiply;
5034 
5035   z_aghi(xstart, -1);
5036   z_brl(L_one_x);   // Special case: length of x is 1.
5037 
5038   // Load next two integers of x.
5039   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
5040   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
5041 
5042 
5043   bind(L_first_loop);
5044 
5045   z_aghi(idx, -1);
5046   z_brl(L_first_loop_exit);
5047   z_aghi(idx, -1);
5048   z_brl(L_one_y);
5049 
5050   // Load next two integers of y.
5051   z_sllg(Z_R1_scratch, idx, LogBytesPerInt);
5052   mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));
5053 
5054 
5055   bind(L_multiply);
5056 
5057   Register multiplicand = product->successor();
5058   Register product_low = multiplicand;
5059 
5060   lgr_if_needed(multiplicand, x_xstart);
5061   z_mlgr(product, y_idx);     // multiplicand * y_idx -> product::multiplicand
5062   clear_reg(Z_R7);
5063   z_algr(product_low, carry); // Add carry to result.
5064   z_alcgr(product, Z_R7);     // Add carry of the last addition.
5065   add2reg(kdx, -2);
5066 
5067   // Store result.
5068   z_sllg(Z_R7, kdx, LogBytesPerInt);
5069   reg2mem_opt(product_low, Address(z, Z_R7, 0));
5070   lgr_if_needed(carry, product);
5071   z_bru(L_first_loop);
5072 
5073 
5074   bind(L_one_y); // Load one 32 bit portion of y as (0,value).
5075 
5076   clear_reg(y_idx);
5077   mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);
5078   z_bru(L_multiply);
5079 
5080 
5081   bind(L_one_x); // Load one 32 bit portion of x as (0,value).
5082 
5083   clear_reg(x_xstart);
5084   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
5085   z_bru(L_first_loop);
5086 
5087   bind(L_first_loop_exit);
5088 }
5089 
5090 // Multiply 64 bit by 64 bit and add 128 bit.
5091 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,
5092                                             Register z,
5093                                             Register yz_idx, Register idx,
5094                                             Register carry, Register product,
5095                                             int offset) {
5096   // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
5097   // z[kdx] = (jlong)product;
5098 
5099   Register multiplicand = product->successor();
5100   Register product_low = multiplicand;
5101 
5102   z_sllg(Z_R7, idx, LogBytesPerInt);
5103   mem2reg_opt(yz_idx, Address(y, Z_R7, offset));
5104 
5105   lgr_if_needed(multiplicand, x_xstart);
5106   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
5107   mem2reg_opt(yz_idx, Address(z, Z_R7, offset));
5108 
5109   add2_with_carry(product, product_low, carry, yz_idx);
5110 
5111   z_sllg(Z_R7, idx, LogBytesPerInt);
5112   reg2mem_opt(product_low, Address(z, Z_R7, offset));
5113 
5114 }
5115 
5116 // Multiply 128 bit by 128 bit. Unrolled inner loop.
5117 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
5118                                              Register y, Register z,
5119                                              Register yz_idx, Register idx,
5120                                              Register jdx,
5121                                              Register carry, Register product,
5122                                              Register carry2) {
5123   // jlong carry, x[], y[], z[];
5124   // int kdx = ystart+1;
5125   // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
5126   //   huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
5127   //   z[kdx+idx+1] = (jlong)product;
5128   //   jlong carry2 = (jlong)(product >>> 64);
5129   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
5130   //   z[kdx+idx] = (jlong)product;
5131   //   carry = (jlong)(product >>> 64);
5132   // }
5133   // idx += 2;
5134   // if (idx > 0) {
5135   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
5136   //   z[kdx+idx] = (jlong)product;
5137   //   carry = (jlong)(product >>> 64);
5138   // }
5139 
5140   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
5141 
5142   // scale the index
5143   lgr_if_needed(jdx, idx);
5144   and_imm(jdx, 0xfffffffffffffffcL);
5145   rshift(jdx, 2);
5146 
5147 
5148   bind(L_third_loop);
5149 
5150   z_aghi(jdx, -1);
5151   z_brl(L_third_loop_exit);
5152   add2reg(idx, -4);
5153 
5154   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
5155   lgr_if_needed(carry2, product);
5156 
5157   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
5158   lgr_if_needed(carry, product);
5159   z_bru(L_third_loop);
5160 
5161 
5162   bind(L_third_loop_exit);  // Handle any left-over operand parts.
5163 
5164   and_imm(idx, 0x3);
5165   z_brz(L_post_third_loop_done);
5166 
5167   Label L_check_1;
5168 
5169   z_aghi(idx, -2);
5170   z_brl(L_check_1);
5171 
5172   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
5173   lgr_if_needed(carry, product);
5174 
5175 
5176   bind(L_check_1);
5177 
5178   add2reg(idx, 0x2);
5179   and_imm(idx, 0x1);
5180   z_aghi(idx, -1);
5181   z_brl(L_post_third_loop_done);
5182 
5183   Register   multiplicand = product->successor();
5184   Register   product_low = multiplicand;
5185 
5186   z_sllg(Z_R7, idx, LogBytesPerInt);
5187   clear_reg(yz_idx);
5188   mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);
5189   lgr_if_needed(multiplicand, x_xstart);
5190   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
5191   clear_reg(yz_idx);
5192   mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);
5193 
5194   add2_with_carry(product, product_low, yz_idx, carry);
5195 
5196   z_sllg(Z_R7, idx, LogBytesPerInt);
5197   reg2mem_opt(product_low, Address(z, Z_R7, 0), false);
5198   rshift(product_low, 32);
5199 
5200   lshift(product, 32);
5201   z_ogr(product_low, product);
5202   lgr_if_needed(carry, product_low);
5203 
5204   bind(L_post_third_loop_done);
5205 }
5206 
5207 void MacroAssembler::multiply_to_len(Register x, Register xlen,
5208                                      Register y, Register ylen,
5209                                      Register z,
5210                                      Register tmp1, Register tmp2,
5211                                      Register tmp3, Register tmp4,
5212                                      Register tmp5) {
5213   ShortBranchVerifier sbv(this);
5214 
5215   assert_different_registers(x, xlen, y, ylen, z,
5216                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);
5217   assert_different_registers(x, xlen, y, ylen, z,
5218                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);
5219 
5220   z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
5221 
5222   // In openJdk, we store the argument as 32-bit value to slot.
5223   Address zlen(Z_SP, _z_abi(remaining_cargs));  // Int in long on big endian.
5224 
5225   const Register idx = tmp1;
5226   const Register kdx = tmp2;
5227   const Register xstart = tmp3;
5228 
5229   const Register y_idx = tmp4;
5230   const Register carry = tmp5;
5231   const Register product  = Z_R0_scratch;
5232   const Register x_xstart = Z_R8;
5233 
5234   // First Loop.
5235   //
5236   //   final static long LONG_MASK = 0xffffffffL;
5237   //   int xstart = xlen - 1;
5238   //   int ystart = ylen - 1;
5239   //   long carry = 0;
5240   //   for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
5241   //     long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
5242   //     z[kdx] = (int)product;
5243   //     carry = product >>> 32;
5244   //   }
5245   //   z[xstart] = (int)carry;
5246   //
5247 
5248   lgr_if_needed(idx, ylen);  // idx = ylen
5249   z_llgf(kdx, zlen);         // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
5250   clear_reg(carry);          // carry = 0
5251 
5252   Label L_done;
5253 
5254   lgr_if_needed(xstart, xlen);
5255   z_aghi(xstart, -1);
5256   z_brl(L_done);
5257 
5258   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
5259 
5260   NearLabel L_second_loop;
5261   compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);
5262 
5263   NearLabel L_carry;
5264   z_aghi(kdx, -1);
5265   z_brz(L_carry);
5266 
5267   // Store lower 32 bits of carry.
5268   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
5269   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5270   rshift(carry, 32);
5271   z_aghi(kdx, -1);
5272 
5273 
5274   bind(L_carry);
5275 
5276   // Store upper 32 bits of carry.
5277   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
5278   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5279 
5280   // Second and third (nested) loops.
5281   //
5282   // for (int i = xstart-1; i >= 0; i--) { // Second loop
5283   //   carry = 0;
5284   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
5285   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
5286   //                    (z[k] & LONG_MASK) + carry;
5287   //     z[k] = (int)product;
5288   //     carry = product >>> 32;
5289   //   }
5290   //   z[i] = (int)carry;
5291   // }
5292   //
5293   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
5294 
5295   const Register jdx = tmp1;
5296 
5297   bind(L_second_loop);
5298 
5299   clear_reg(carry);           // carry = 0;
5300   lgr_if_needed(jdx, ylen);   // j = ystart+1
5301 
5302   z_aghi(xstart, -1);         // i = xstart-1;
5303   z_brl(L_done);
5304 
5305   // Use free slots in the current stackframe instead of push/pop.
5306   Address zsave(Z_SP, _z_abi(carg_1));
5307   reg2mem_opt(z, zsave);
5308 
5309 
5310   Label L_last_x;
5311 
5312   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
5313   load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j
5314   z_aghi(xstart, -1);                           // i = xstart-1;
5315   z_brl(L_last_x);
5316 
5317   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
5318   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
5319 
5320 
5321   Label L_third_loop_prologue;
5322 
5323   bind(L_third_loop_prologue);
5324 
5325   Address xsave(Z_SP, _z_abi(carg_2));
5326   Address xlensave(Z_SP, _z_abi(carg_3));
5327   Address ylensave(Z_SP, _z_abi(carg_4));
5328 
5329   reg2mem_opt(x, xsave);
5330   reg2mem_opt(xstart, xlensave);
5331   reg2mem_opt(ylen, ylensave);
5332 
5333 
5334   multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
5335 
5336   mem2reg_opt(z, zsave);
5337   mem2reg_opt(x, xsave);
5338   mem2reg_opt(xlen, xlensave);   // This is the decrement of the loop counter!
5339   mem2reg_opt(ylen, ylensave);
5340 
5341   add2reg(tmp3, 1, xlen);
5342   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
5343   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5344   z_aghi(tmp3, -1);
5345   z_brl(L_done);
5346 
5347   rshift(carry, 32);
5348   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
5349   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5350   z_bru(L_second_loop);
5351 
5352   // Next infrequent code is moved outside loops.
5353   bind(L_last_x);
5354 
5355   clear_reg(x_xstart);
5356   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
5357   z_bru(L_third_loop_prologue);
5358 
5359   bind(L_done);
5360 
5361   z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
5362 }
5363 
5364 void MacroAssembler::asm_assert(branch_condition cond, const char* msg, int id, bool is_static) {
5365 #ifdef ASSERT
5366   Label ok;
5367   z_brc(cond, ok);
5368   is_static ? stop_static(msg, id) : stop(msg, id);
5369   bind(ok);
5370 #endif // ASSERT
5371 }
5372 
5373 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).
5374 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
5375 #ifdef ASSERT
5376   asm_assert(check_equal ? bcondEqual : bcondNotEqual, msg, id);
5377 #endif // ASSERT
5378 }
5379 
5380 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
5381                                           Register mem_base, const char* msg, int id) {
5382 #ifdef ASSERT
5383   switch (size) {
5384     case 4:
5385       load_and_test_int(Z_R0, Address(mem_base, mem_offset));
5386       break;
5387     case 8:
5388       load_and_test_long(Z_R0,  Address(mem_base, mem_offset));
5389       break;
5390     default:
5391       ShouldNotReachHere();
5392   }
5393   // if relocation is not allowed then stop_static() will be called otherwise call stop()
5394   asm_assert(check_equal ? bcondEqual : bcondNotEqual, msg, id, !allow_relocation);
5395 #endif // ASSERT
5396 }
5397 
5398 // Check the condition
5399 //   expected_size == FP - SP
5400 // after transformation:
5401 //   expected_size - FP + SP == 0
5402 // Destroys Register expected_size if no tmp register is passed.
5403 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {
5404 #ifdef ASSERT
5405   lgr_if_needed(tmp, expected_size);
5406   z_algr(tmp, Z_SP);
5407   z_slg(tmp, 0, Z_R0, Z_SP);
5408   asm_assert(bcondEqual, msg, id);
5409 #endif // ASSERT
5410 }
5411 
5412 // Save and restore functions: Exclude Z_R0.
5413 void MacroAssembler::save_volatile_regs(Register dst, int offset, bool include_fp, bool include_flags) {
5414   z_stmg(Z_R1, Z_R5, offset, dst); offset += 5 * BytesPerWord;
5415   if (include_fp) {
5416     z_std(Z_F0, Address(dst, offset)); offset += BytesPerWord;
5417     z_std(Z_F1, Address(dst, offset)); offset += BytesPerWord;
5418     z_std(Z_F2, Address(dst, offset)); offset += BytesPerWord;
5419     z_std(Z_F3, Address(dst, offset)); offset += BytesPerWord;
5420     z_std(Z_F4, Address(dst, offset)); offset += BytesPerWord;
5421     z_std(Z_F5, Address(dst, offset)); offset += BytesPerWord;
5422     z_std(Z_F6, Address(dst, offset)); offset += BytesPerWord;
5423     z_std(Z_F7, Address(dst, offset)); offset += BytesPerWord;
5424   }
5425   if (include_flags) {
5426     Label done;
5427     z_mvi(Address(dst, offset), 2); // encoding: equal
5428     z_bre(done);
5429     z_mvi(Address(dst, offset), 4); // encoding: higher
5430     z_brh(done);
5431     z_mvi(Address(dst, offset), 1); // encoding: lower
5432     bind(done);
5433   }
5434 }
5435 void MacroAssembler::restore_volatile_regs(Register src, int offset, bool include_fp, bool include_flags) {
5436   z_lmg(Z_R1, Z_R5, offset, src); offset += 5 * BytesPerWord;
5437   if (include_fp) {
5438     z_ld(Z_F0, Address(src, offset)); offset += BytesPerWord;
5439     z_ld(Z_F1, Address(src, offset)); offset += BytesPerWord;
5440     z_ld(Z_F2, Address(src, offset)); offset += BytesPerWord;
5441     z_ld(Z_F3, Address(src, offset)); offset += BytesPerWord;
5442     z_ld(Z_F4, Address(src, offset)); offset += BytesPerWord;
5443     z_ld(Z_F5, Address(src, offset)); offset += BytesPerWord;
5444     z_ld(Z_F6, Address(src, offset)); offset += BytesPerWord;
5445     z_ld(Z_F7, Address(src, offset)); offset += BytesPerWord;
5446   }
5447   if (include_flags) {
5448     z_cli(Address(src, offset), 2); // see encoding above
5449   }
5450 }
5451 
5452 // Plausibility check for oops.
5453 void MacroAssembler::verify_oop(Register oop, const char* msg) {
5454   if (!VerifyOops) return;
5455 
5456   BLOCK_COMMENT("verify_oop {");
5457   unsigned int nbytes_save = (5 + 8 + 1) * BytesPerWord;
5458   address entry_addr = StubRoutines::verify_oop_subroutine_entry_address();
5459 
5460   save_return_pc();
5461 
5462   // Push frame, but preserve flags
5463   z_lgr(Z_R0, Z_SP);
5464   z_lay(Z_SP, -((int64_t)nbytes_save + frame::z_abi_160_size), Z_SP);
5465   z_stg(Z_R0, _z_abi(callers_sp), Z_SP);
5466 
5467   save_volatile_regs(Z_SP, frame::z_abi_160_size, true, true);
5468 
5469   lgr_if_needed(Z_ARG2, oop);
5470   load_const_optimized(Z_ARG1, (address)msg);
5471   load_const_optimized(Z_R1, entry_addr);
5472   z_lg(Z_R1, 0, Z_R1);
5473   call_c(Z_R1);
5474 
5475   restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, true);
5476   pop_frame();
5477   restore_return_pc();
5478 
5479   BLOCK_COMMENT("} verify_oop ");
5480 }
5481 
5482 void MacroAssembler::verify_oop_addr(Address addr, const char* msg) {
5483   if (!VerifyOops) return;
5484 
5485   BLOCK_COMMENT("verify_oop {");
5486   unsigned int nbytes_save = (5 + 8) * BytesPerWord;
5487   address entry_addr = StubRoutines::verify_oop_subroutine_entry_address();
5488 
5489   save_return_pc();
5490   unsigned int frame_size = push_frame_abi160(nbytes_save); // kills Z_R0
5491   save_volatile_regs(Z_SP, frame::z_abi_160_size, true, false);
5492 
5493   z_lg(Z_ARG2, addr.plus_disp(frame_size));
5494   load_const_optimized(Z_ARG1, (address)msg);
5495   load_const_optimized(Z_R1, entry_addr);
5496   z_lg(Z_R1, 0, Z_R1);
5497   call_c(Z_R1);
5498 
5499   restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, false);
5500   pop_frame();
5501   restore_return_pc();
5502 
5503   BLOCK_COMMENT("} verify_oop ");
5504 }
5505 
5506 const char* MacroAssembler::stop_types[] = {
5507   "stop",
5508   "untested",
5509   "unimplemented",
5510   "shouldnotreachhere"
5511 };
5512 
5513 static void stop_on_request(const char* tp, const char* msg) {
5514   tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);
5515   guarantee(false, "Z assembly code requires stop: %s", msg);
5516 }
5517 
5518 void MacroAssembler::stop(int type, const char* msg, int id) {
5519   BLOCK_COMMENT(err_msg("stop: %s {", msg));
5520 
5521   // Setup arguments.
5522   load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
5523   load_const(Z_ARG2, (void*) msg);
5524   get_PC(Z_R14);     // Following code pushes a frame without entering a new function. Use current pc as return address.
5525   save_return_pc();  // Saves return pc Z_R14.
5526   push_frame_abi160(0);
5527   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
5528   // The plain disassembler does not recognize illtrap. It instead displays
5529   // a 32-bit value. Issuing two illtraps assures the disassembler finds
5530   // the proper beginning of the next instruction.
5531   z_illtrap(id); // Illegal instruction.
5532   z_illtrap(id); // Illegal instruction.
5533 
5534   BLOCK_COMMENT(" } stop");
5535 }
5536 
5537 // Special version of stop() for code size reduction.
5538 // Reuses the previously generated call sequence, if any.
5539 // Generates the call sequence on its own, if necessary.
5540 // Note: This code will work only in non-relocatable code!
5541 //       The relative address of the data elements (arg1, arg2) must not change.
5542 //       The reentry point must not move relative to it's users. This prerequisite
5543 //       should be given for "hand-written" code, if all chain calls are in the same code blob.
5544 //       Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
5545 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
5546   BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==nullptr?"init":"cont", allow_relocation?"reloc ":"static", msg));
5547 
5548   // Setup arguments.
5549   if (allow_relocation) {
5550     // Relocatable version (for comparison purposes). Remove after some time.
5551     load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
5552     load_const(Z_ARG2, (void*) msg);
5553   } else {
5554     load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
5555     load_absolute_address(Z_ARG2, (address)msg);
5556   }
5557   if ((reentry != nullptr) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
5558     BLOCK_COMMENT("branch to reentry point:");
5559     z_brc(bcondAlways, reentry);
5560   } else {
5561     BLOCK_COMMENT("reentry point:");
5562     reentry = pc();      // Re-entry point for subsequent stop calls.
5563     save_return_pc();    // Saves return pc Z_R14.
5564     push_frame_abi160(0);
5565     if (allow_relocation) {
5566       reentry = nullptr;    // Prevent reentry if code relocation is allowed.
5567       call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
5568     } else {
5569       call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
5570     }
5571     z_illtrap(id); // Illegal instruction as emergency stop, should the above call return.
5572   }
5573   BLOCK_COMMENT(" } stop_chain");
5574 
5575   return reentry;
5576 }
5577 
5578 // Special version of stop() for code size reduction.
5579 // Assumes constant relative addresses for data and runtime call.
5580 void MacroAssembler::stop_static(int type, const char* msg, int id) {
5581   stop_chain(nullptr, type, msg, id, false);
5582 }
5583 
5584 void MacroAssembler::stop_subroutine() {
5585   unimplemented("stop_subroutine", 710);
5586 }
5587 
5588 // Prints msg to stdout from within generated code..
5589 void MacroAssembler::warn(const char* msg) {
5590   RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);
5591   load_absolute_address(Z_R1, (address) warning);
5592   load_absolute_address(Z_ARG1, (address) msg);
5593   (void) call(Z_R1);
5594   RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);
5595 }
5596 
5597 #ifndef PRODUCT
5598 
5599 // Write pattern 0x0101010101010101 in region [low-before, high+after].
5600 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {
5601   if (!ZapEmptyStackFields) return;
5602   BLOCK_COMMENT("zap memory region {");
5603   load_const_optimized(val, 0x0101010101010101);
5604   int size = before + after;
5605   if (low == high && size < 5 && size > 0) {
5606     int offset = -before*BytesPerWord;
5607     for (int i = 0; i < size; ++i) {
5608       z_stg(val, Address(low, offset));
5609       offset +=(1*BytesPerWord);
5610     }
5611   } else {
5612     add2reg(addr, -before*BytesPerWord, low);
5613     if (after) {
5614 #ifdef ASSERT
5615       jlong check = after * BytesPerWord;
5616       assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");
5617 #endif
5618       add2reg(high, after * BytesPerWord);
5619     }
5620     NearLabel loop;
5621     bind(loop);
5622     z_stg(val, Address(addr));
5623     add2reg(addr, 8);
5624     compare64_and_branch(addr, high, bcondNotHigh, loop);
5625     if (after) {
5626       add2reg(high, -after * BytesPerWord);
5627     }
5628   }
5629   BLOCK_COMMENT("} zap memory region");
5630 }
5631 #endif // !PRODUCT
5632 
5633 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
5634   _masm = masm;
5635   _masm->load_absolute_address(_rscratch, (address)flag_addr);
5636   _masm->load_and_test_int(_rscratch, Address(_rscratch));
5637   if (value) {
5638     _masm->z_brne(_label); // Skip if true, i.e. != 0.
5639   } else {
5640     _masm->z_bre(_label);  // Skip if false, i.e. == 0.
5641   }
5642 }
5643 
5644 SkipIfEqual::~SkipIfEqual() {
5645   _masm->bind(_label);
5646 }
5647 
5648 // Implements lightweight-locking.
5649 // Branches to slow upon failure to lock the object.
5650 // Falls through upon success.
5651 //
5652 //  - obj: the object to be locked, contents preserved.
5653 //  - hdr: the header, already loaded from obj, contents destroyed.
5654 //  Note: make sure Z_R1 is not manipulated here when C2 compiler is in play
5655 void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register temp, Label& slow_case) {
5656 
5657   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
5658   assert_different_registers(obj, hdr, temp);
5659 
5660   // First we need to check if the lock-stack has room for pushing the object reference.
5661   z_lgf(temp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
5662 
5663   compareU32_and_branch(temp, (unsigned)LockStack::end_offset()-1, bcondHigh, slow_case);
5664 
5665   // attempting a lightweight_lock
5666   // Load (object->mark() | 1) into hdr
5667   z_oill(hdr, markWord::unlocked_value);
5668 
5669   z_lgr(temp, hdr);
5670 
5671   // Clear lock-bits from hdr (locked state)
5672   z_xilf(temp, markWord::unlocked_value);
5673 
5674   z_csg(hdr, temp, oopDesc::mark_offset_in_bytes(), obj);
5675   branch_optimized(Assembler::bcondNotEqual, slow_case);
5676 
5677   // After successful lock, push object on lock-stack
5678   z_lgf(temp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
5679   z_stg(obj, Address(Z_thread, temp));
5680   z_ahi(temp, oopSize);
5681   z_st(temp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
5682 
5683   // as locking was successful, set CC to EQ
5684   z_cr(temp, temp);
5685 }
5686 
5687 // Implements lightweight-unlocking.
5688 // Branches to slow upon failure.
5689 // Falls through upon success.
5690 //
5691 // - obj: the object to be unlocked
5692 // - hdr: the (pre-loaded) header of the object, will be destroyed
5693 // - Z_R1_scratch: will be killed in case of Interpreter & C1 Compiler
5694 void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow) {
5695 
5696   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
5697   assert_different_registers(obj, hdr, tmp);
5698 
5699 #ifdef ASSERT
5700   {
5701     // Check that hdr is lightweight-locked.
5702     Label hdr_ok;
5703     z_lgr(tmp, hdr);
5704     z_nill(tmp, markWord::lock_mask_in_place);
5705     z_bre(hdr_ok);
5706     stop("Header is not lightweight-locked");
5707     bind(hdr_ok);
5708   }
5709   {
5710     // The following checks rely on the fact that LockStack is only ever modified by
5711     // its owning thread, even if the lock got inflated concurrently; removal of LockStack
5712     // entries after inflation will happen delayed in that case.
5713 
5714     // Check for lock-stack underflow.
5715     Label stack_ok;
5716     z_lgf(tmp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
5717     compareU32_and_branch(tmp, (unsigned)LockStack::start_offset(), Assembler::bcondHigh, stack_ok);
5718     stop("Lock-stack underflow");
5719     bind(stack_ok);
5720   }
5721   {
5722     // Check if the top of the lock-stack matches the unlocked object.
5723     Label tos_ok;
5724     z_aghi(tmp, -oopSize);
5725     z_lg(tmp, Address(Z_thread, tmp));
5726     compare64_and_branch(tmp, obj, Assembler::bcondEqual, tos_ok);
5727     stop("Top of lock-stack does not match the unlocked object");
5728     bind(tos_ok);
5729   }
5730 #endif // ASSERT
5731 
5732   z_lgr(tmp, hdr);
5733   z_oill(tmp, markWord::unlocked_value);
5734   z_csg(hdr, tmp, oopDesc::mark_offset_in_bytes(), obj);
5735   branch_optimized(Assembler::bcondNotEqual, slow);
5736 
5737   // After successful unlock, pop object from lock-stack
5738 #ifdef ASSERT
5739   z_lgf(tmp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
5740   z_aghi(tmp, -oopSize);
5741   z_agr(tmp, Z_thread);
5742   z_xc(0, oopSize-1, tmp, 0, tmp);  // wipe out lock-stack entry
5743 #endif
5744   z_alsi(in_bytes(JavaThread::lock_stack_top_offset()), Z_thread, -oopSize);  // pop object
5745   z_cr(tmp, tmp); // set CC to EQ
5746 }