1 /*
   2  * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2022 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/codeBuffer.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/barrierSetAssembler.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "gc/shared/cardTableBarrierSet.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.hpp"
  37 #include "oops/accessDecorators.hpp"
  38 #include "oops/compressedOops.inline.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "prims/methodHandles.hpp"
  41 #include "registerSaver_s390.hpp"
  42 #include "runtime/icache.hpp"
  43 #include "runtime/interfaceSupport.inline.hpp"
  44 #include "runtime/objectMonitor.hpp"
  45 #include "runtime/os.hpp"
  46 #include "runtime/safepoint.hpp"
  47 #include "runtime/safepointMechanism.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "utilities/events.hpp"
  51 #include "utilities/macros.hpp"
  52 #include "utilities/powerOfTwo.hpp"
  53 
  54 #include <ucontext.h>
  55 
  56 #define BLOCK_COMMENT(str) block_comment(str)
  57 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
  58 
  59 // Move 32-bit register if destination and source are different.
  60 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
  61   if (rs != rd) { z_lr(rd, rs); }
  62 }
  63 
  64 // Move register if destination and source are different.
  65 void MacroAssembler::lgr_if_needed(Register rd, Register rs) {
  66   if (rs != rd) { z_lgr(rd, rs); }
  67 }
  68 
  69 // Zero-extend 32-bit register into 64-bit register if destination and source are different.
  70 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {
  71   if (rs != rd) { z_llgfr(rd, rs); }
  72 }
  73 
  74 // Move float register if destination and source are different.
  75 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {
  76   if (rs != rd) { z_ldr(rd, rs); }
  77 }
  78 
  79 // Move integer register if destination and source are different.
  80 // It is assumed that shorter-than-int types are already
  81 // appropriately sign-extended.
  82 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,
  83                                         BasicType src_type) {
  84   assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");
  85   assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");
  86 
  87   if (dst_type == src_type) {
  88     lgr_if_needed(dst, src); // Just move all 64 bits.
  89     return;
  90   }
  91 
  92   switch (dst_type) {
  93     // Do not support these types for now.
  94     //  case T_BOOLEAN:
  95     case T_BYTE:  // signed byte
  96       switch (src_type) {
  97         case T_INT:
  98           z_lgbr(dst, src);
  99           break;
 100         default:
 101           ShouldNotReachHere();
 102       }
 103       return;
 104 
 105     case T_CHAR:
 106     case T_SHORT:
 107       switch (src_type) {
 108         case T_INT:
 109           if (dst_type == T_CHAR) {
 110             z_llghr(dst, src);
 111           } else {
 112             z_lghr(dst, src);
 113           }
 114           break;
 115         default:
 116           ShouldNotReachHere();
 117       }
 118       return;
 119 
 120     case T_INT:
 121       switch (src_type) {
 122         case T_BOOLEAN:
 123         case T_BYTE:
 124         case T_CHAR:
 125         case T_SHORT:
 126         case T_INT:
 127         case T_LONG:
 128         case T_OBJECT:
 129         case T_ARRAY:
 130         case T_VOID:
 131         case T_ADDRESS:
 132           lr_if_needed(dst, src);
 133           // llgfr_if_needed(dst, src);  // zero-extend (in case we need to find a bug).
 134           return;
 135 
 136         default:
 137           assert(false, "non-integer src type");
 138           return;
 139       }
 140     case T_LONG:
 141       switch (src_type) {
 142         case T_BOOLEAN:
 143         case T_BYTE:
 144         case T_CHAR:
 145         case T_SHORT:
 146         case T_INT:
 147           z_lgfr(dst, src); // sign extension
 148           return;
 149 
 150         case T_LONG:
 151         case T_OBJECT:
 152         case T_ARRAY:
 153         case T_VOID:
 154         case T_ADDRESS:
 155           lgr_if_needed(dst, src);
 156           return;
 157 
 158         default:
 159           assert(false, "non-integer src type");
 160           return;
 161       }
 162       return;
 163     case T_OBJECT:
 164     case T_ARRAY:
 165     case T_VOID:
 166     case T_ADDRESS:
 167       switch (src_type) {
 168         // These types don't make sense to be converted to pointers:
 169         //      case T_BOOLEAN:
 170         //      case T_BYTE:
 171         //      case T_CHAR:
 172         //      case T_SHORT:
 173 
 174         case T_INT:
 175           z_llgfr(dst, src); // zero extension
 176           return;
 177 
 178         case T_LONG:
 179         case T_OBJECT:
 180         case T_ARRAY:
 181         case T_VOID:
 182         case T_ADDRESS:
 183           lgr_if_needed(dst, src);
 184           return;
 185 
 186         default:
 187           assert(false, "non-integer src type");
 188           return;
 189       }
 190       return;
 191     default:
 192       assert(false, "non-integer dst type");
 193       return;
 194   }
 195 }
 196 
 197 // Move float register if destination and source are different.
 198 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,
 199                                          FloatRegister src, BasicType src_type) {
 200   assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");
 201   assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");
 202   if (dst_type == src_type) {
 203     ldr_if_needed(dst, src); // Just move all 64 bits.
 204   } else {
 205     switch (dst_type) {
 206       case T_FLOAT:
 207         assert(src_type == T_DOUBLE, "invalid float type combination");
 208         z_ledbr(dst, src);
 209         return;
 210       case T_DOUBLE:
 211         assert(src_type == T_FLOAT, "invalid float type combination");
 212         z_ldebr(dst, src);
 213         return;
 214       default:
 215         assert(false, "non-float dst type");
 216         return;
 217     }
 218   }
 219 }
 220 
 221 // Optimized emitter for reg to mem operations.
 222 // Uses modern instructions if running on modern hardware, classic instructions
 223 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 224 // Data register (reg) cannot be used as work register.
 225 //
 226 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 227 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 228 void MacroAssembler::freg2mem_opt(FloatRegister reg,
 229                                   int64_t       disp,
 230                                   Register      index,
 231                                   Register      base,
 232                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 233                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 234                                   Register      scratch) {
 235   index = (index == noreg) ? Z_R0 : index;
 236   if (Displacement::is_shortDisp(disp)) {
 237     (this->*classic)(reg, disp, index, base);
 238   } else {
 239     if (Displacement::is_validDisp(disp)) {
 240       (this->*modern)(reg, disp, index, base);
 241     } else {
 242       if (scratch != Z_R0 && scratch != Z_R1) {
 243         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 244       } else {
 245         if (scratch != Z_R0) {   // scratch == Z_R1
 246           if ((scratch == index) || (index == base)) {
 247             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 248           } else {
 249             add2reg(scratch, disp, base);
 250             (this->*classic)(reg, 0, index, scratch);
 251             if (base == scratch) {
 252               add2reg(base, -disp);  // Restore base.
 253             }
 254           }
 255         } else {   // scratch == Z_R0
 256           z_lgr(scratch, base);
 257           add2reg(base, disp);
 258           (this->*classic)(reg, 0, index, base);
 259           z_lgr(base, scratch);      // Restore base.
 260         }
 261       }
 262     }
 263   }
 264 }
 265 
 266 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {
 267   if (is_double) {
 268     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));
 269   } else {
 270     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));
 271   }
 272 }
 273 
 274 // Optimized emitter for mem to reg operations.
 275 // Uses modern instructions if running on modern hardware, classic instructions
 276 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 277 // data register (reg) cannot be used as work register.
 278 //
 279 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 280 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 281 void MacroAssembler::mem2freg_opt(FloatRegister reg,
 282                                   int64_t       disp,
 283                                   Register      index,
 284                                   Register      base,
 285                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 286                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 287                                   Register      scratch) {
 288   index = (index == noreg) ? Z_R0 : index;
 289   if (Displacement::is_shortDisp(disp)) {
 290     (this->*classic)(reg, disp, index, base);
 291   } else {
 292     if (Displacement::is_validDisp(disp)) {
 293       (this->*modern)(reg, disp, index, base);
 294     } else {
 295       if (scratch != Z_R0 && scratch != Z_R1) {
 296         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 297       } else {
 298         if (scratch != Z_R0) {   // scratch == Z_R1
 299           if ((scratch == index) || (index == base)) {
 300             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 301           } else {
 302             add2reg(scratch, disp, base);
 303             (this->*classic)(reg, 0, index, scratch);
 304             if (base == scratch) {
 305               add2reg(base, -disp);  // Restore base.
 306             }
 307           }
 308         } else {   // scratch == Z_R0
 309           z_lgr(scratch, base);
 310           add2reg(base, disp);
 311           (this->*classic)(reg, 0, index, base);
 312           z_lgr(base, scratch);      // Restore base.
 313         }
 314       }
 315     }
 316   }
 317 }
 318 
 319 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {
 320   if (is_double) {
 321     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));
 322   } else {
 323     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));
 324   }
 325 }
 326 
 327 // Optimized emitter for reg to mem operations.
 328 // Uses modern instructions if running on modern hardware, classic instructions
 329 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 330 // Data register (reg) cannot be used as work register.
 331 //
 332 // Don't rely on register locking, instead pass a scratch register
 333 // (Z_R0 by default)
 334 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!
 335 void MacroAssembler::reg2mem_opt(Register reg,
 336                                  int64_t  disp,
 337                                  Register index,
 338                                  Register base,
 339                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 340                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
 341                                  Register scratch) {
 342   index = (index == noreg) ? Z_R0 : index;
 343   if (Displacement::is_shortDisp(disp)) {
 344     (this->*classic)(reg, disp, index, base);
 345   } else {
 346     if (Displacement::is_validDisp(disp)) {
 347       (this->*modern)(reg, disp, index, base);
 348     } else {
 349       if (scratch != Z_R0 && scratch != Z_R1) {
 350         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 351       } else {
 352         if (scratch != Z_R0) {   // scratch == Z_R1
 353           if ((scratch == index) || (index == base)) {
 354             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 355           } else {
 356             add2reg(scratch, disp, base);
 357             (this->*classic)(reg, 0, index, scratch);
 358             if (base == scratch) {
 359               add2reg(base, -disp);  // Restore base.
 360             }
 361           }
 362         } else {   // scratch == Z_R0
 363           if ((scratch == reg) || (scratch == base) || (reg == base)) {
 364             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 365           } else {
 366             z_lgr(scratch, base);
 367             add2reg(base, disp);
 368             (this->*classic)(reg, 0, index, base);
 369             z_lgr(base, scratch);    // Restore base.
 370           }
 371         }
 372       }
 373     }
 374   }
 375 }
 376 
 377 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {
 378   int store_offset = offset();
 379   if (is_double) {
 380     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));
 381   } else {
 382     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));
 383   }
 384   return store_offset;
 385 }
 386 
 387 // Optimized emitter for mem to reg operations.
 388 // Uses modern instructions if running on modern hardware, classic instructions
 389 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 390 // Data register (reg) will be used as work register where possible.
 391 void MacroAssembler::mem2reg_opt(Register reg,
 392                                  int64_t  disp,
 393                                  Register index,
 394                                  Register base,
 395                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 396                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {
 397   index = (index == noreg) ? Z_R0 : index;
 398   if (Displacement::is_shortDisp(disp)) {
 399     (this->*classic)(reg, disp, index, base);
 400   } else {
 401     if (Displacement::is_validDisp(disp)) {
 402       (this->*modern)(reg, disp, index, base);
 403     } else {
 404       if ((reg == index) && (reg == base)) {
 405         z_sllg(reg, reg, 1);
 406         add2reg(reg, disp);
 407         (this->*classic)(reg, 0, noreg, reg);
 408       } else if ((reg == index) && (reg != Z_R0)) {
 409         add2reg(reg, disp);
 410         (this->*classic)(reg, 0, reg, base);
 411       } else if (reg == base) {
 412         add2reg(reg, disp);
 413         (this->*classic)(reg, 0, index, reg);
 414       } else if (reg != Z_R0) {
 415         add2reg(reg, disp, base);
 416         (this->*classic)(reg, 0, index, reg);
 417       } else { // reg == Z_R0 && reg != base here
 418         add2reg(base, disp);
 419         (this->*classic)(reg, 0, index, base);
 420         add2reg(base, -disp);
 421       }
 422     }
 423   }
 424 }
 425 
 426 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {
 427   if (is_double) {
 428     z_lg(reg, a);
 429   } else {
 430     mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));
 431   }
 432 }
 433 
 434 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {
 435   mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));
 436 }
 437 
 438 void MacroAssembler::and_imm(Register r, long mask,
 439                              Register tmp /* = Z_R0 */,
 440                              bool wide    /* = false */) {
 441   assert(wide || Immediate::is_simm32(mask), "mask value too large");
 442 
 443   if (!wide) {
 444     z_nilf(r, mask);
 445     return;
 446   }
 447 
 448   assert(r != tmp, " need a different temporary register !");
 449   load_const_optimized(tmp, mask);
 450   z_ngr(r, tmp);
 451 }
 452 
 453 // Calculate the 1's complement.
 454 // Note: The condition code is neither preserved nor correctly set by this code!!!
 455 // Note: (wide == false) does not protect the high order half of the target register
 456 //       from alteration. It only serves as optimization hint for 32-bit results.
 457 void MacroAssembler::not_(Register r1, Register r2, bool wide) {
 458 
 459   if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.
 460     z_xilf(r1, -1);
 461     if (wide) {
 462       z_xihf(r1, -1);
 463     }
 464   } else { // Distinct src and dst registers.
 465     load_const_optimized(r1, -1);
 466     z_xgr(r1, r2);
 467   }
 468 }
 469 
 470 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {
 471   assert(lBitPos >=  0,      "zero is  leftmost bit position");
 472   assert(rBitPos <= 63,      "63   is rightmost bit position");
 473   assert(lBitPos <= rBitPos, "inverted selection interval");
 474   return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));
 475 }
 476 
 477 // Helper function for the "Rotate_then_<logicalOP>" emitters.
 478 // Rotate src, then mask register contents such that only bits in range survive.
 479 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.
 480 // For oneBits == true,  all bits not in range are set to 1. Useful for preserving all bits outside range.
 481 // The caller must ensure that the selected range only contains bits with defined value.
 482 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
 483                                       int nRotate, bool src32bit, bool dst32bit, bool oneBits) {
 484   assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");
 485   bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).
 486   bool srl4rll = (nRotate <  0) && (-nRotate <= lBitPos);     // Substitute SRL(G) for RLL(G).
 487   //  Pre-determine which parts of dst will be zero after shift/rotate.
 488   bool llZero  =  sll4rll && (nRotate >= 16);
 489   bool lhZero  = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));
 490   bool lfZero  = llZero && lhZero;
 491   bool hlZero  = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));
 492   bool hhZero  =                                 (srl4rll && (nRotate <= -16));
 493   bool hfZero  = hlZero && hhZero;
 494 
 495   // rotate then mask src operand.
 496   // if oneBits == true,  all bits outside selected range are 1s.
 497   // if oneBits == false, all bits outside selected range are 0s.
 498   if (src32bit) {   // There might be garbage in the upper 32 bits which will get masked away.
 499     if (dst32bit) {
 500       z_rll(dst, src, nRotate);   // Copy and rotate, upper half of reg remains undisturbed.
 501     } else {
 502       if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 503       else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 504       else              { z_rllg(dst, src,  nRotate); }
 505     }
 506   } else {
 507     if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 508     else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 509     else              { z_rllg(dst, src,  nRotate); }
 510   }
 511 
 512   unsigned long  range_mask    = create_mask(lBitPos, rBitPos);
 513   unsigned int   range_mask_h  = (unsigned int)(range_mask >> 32);
 514   unsigned int   range_mask_l  = (unsigned int)range_mask;
 515   unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);
 516   unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);
 517   unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);
 518   unsigned short range_mask_ll = (unsigned short)range_mask;
 519   // Works for z9 and newer H/W.
 520   if (oneBits) {
 521     if ((~range_mask_l) != 0)                { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.
 522     if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }
 523   } else {
 524     // All bits outside range become 0s
 525     if (((~range_mask_l) != 0) &&              !lfZero) {
 526       z_nilf(dst, range_mask_l);
 527     }
 528     if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {
 529       z_nihf(dst, range_mask_h);
 530     }
 531   }
 532 }
 533 
 534 // Rotate src, then insert selected range from rotated src into dst.
 535 // Clear dst before, if requested.
 536 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,
 537                                         int nRotate, bool clear_dst) {
 538   // This version does not depend on src being zero-extended int2long.
 539   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 540   z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.
 541 }
 542 
 543 // Rotate src, then and selected range from rotated src into dst.
 544 // Set condition code only if so requested. Otherwise it is unpredictable.
 545 // See performance note in macroAssembler_s390.hpp for important information.
 546 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,
 547                                      int nRotate, bool test_only) {
 548   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 549   // This version does not depend on src being zero-extended int2long.
 550   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 551   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 552 }
 553 
 554 // Rotate src, then or selected range from rotated src into dst.
 555 // Set condition code only if so requested. Otherwise it is unpredictable.
 556 // See performance note in macroAssembler_s390.hpp for important information.
 557 void MacroAssembler::rotate_then_or(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 558                                     int nRotate, bool test_only) {
 559   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 560   // This version does not depend on src being zero-extended int2long.
 561   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 562   z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 563 }
 564 
 565 // Rotate src, then xor selected range from rotated src into dst.
 566 // Set condition code only if so requested. Otherwise it is unpredictable.
 567 // See performance note in macroAssembler_s390.hpp for important information.
 568 void MacroAssembler::rotate_then_xor(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 569                                      int nRotate, bool test_only) {
 570   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 571     // This version does not depend on src being zero-extended int2long.
 572   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 573   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 574 }
 575 
 576 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {
 577   if (inc.is_register()) {
 578     z_agr(r1, inc.as_register());
 579   } else { // constant
 580     intptr_t imm = inc.as_constant();
 581     add2reg(r1, imm);
 582   }
 583 }
 584 // Helper function to multiply the 64bit contents of a register by a 16bit constant.
 585 // The optimization tries to avoid the mghi instruction, since it uses the FPU for
 586 // calculation and is thus rather slow.
 587 //
 588 // There is no handling for special cases, e.g. cval==0 or cval==1.
 589 //
 590 // Returns len of generated code block.
 591 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {
 592   int block_start = offset();
 593 
 594   bool sign_flip = cval < 0;
 595   cval = sign_flip ? -cval : cval;
 596 
 597   BLOCK_COMMENT("Reg64*Con16 {");
 598 
 599   int bit1 = cval & -cval;
 600   if (bit1 == cval) {
 601     z_sllg(rval, rval, exact_log2(bit1));
 602     if (sign_flip) { z_lcgr(rval, rval); }
 603   } else {
 604     int bit2 = (cval-bit1) & -(cval-bit1);
 605     if ((bit1+bit2) == cval) {
 606       z_sllg(work, rval, exact_log2(bit1));
 607       z_sllg(rval, rval, exact_log2(bit2));
 608       z_agr(rval, work);
 609       if (sign_flip) { z_lcgr(rval, rval); }
 610     } else {
 611       if (sign_flip) { z_mghi(rval, -cval); }
 612       else           { z_mghi(rval,  cval); }
 613     }
 614   }
 615   BLOCK_COMMENT("} Reg64*Con16");
 616 
 617   int block_end = offset();
 618   return block_end - block_start;
 619 }
 620 
 621 // Generic operation r1 := r2 + imm.
 622 //
 623 // Should produce the best code for each supported CPU version.
 624 // r2 == noreg yields r1 := r1 + imm
 625 // imm == 0 emits either no instruction or r1 := r2 !
 626 // NOTES: 1) Don't use this function where fixed sized
 627 //           instruction sequences are required!!!
 628 //        2) Don't use this function if condition code
 629 //           setting is required!
 630 //        3) Despite being declared as int64_t, the parameter imm
 631 //           must be a simm_32 value (= signed 32-bit integer).
 632 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {
 633   assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");
 634 
 635   if (r2 == noreg) { r2 = r1; }
 636 
 637   // Handle special case imm == 0.
 638   if (imm == 0) {
 639     lgr_if_needed(r1, r2);
 640     // Nothing else to do.
 641     return;
 642   }
 643 
 644   if (!PreferLAoverADD || (r2 == Z_R0)) {
 645     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 646 
 647     // Can we encode imm in 16 bits signed?
 648     if (Immediate::is_simm16(imm)) {
 649       if (r1 == r2) {
 650         z_aghi(r1, imm);
 651         return;
 652       }
 653       if (distinctOpnds) {
 654         z_aghik(r1, r2, imm);
 655         return;
 656       }
 657       z_lgr(r1, r2);
 658       z_aghi(r1, imm);
 659       return;
 660     }
 661   } else {
 662     // Can we encode imm in 12 bits unsigned?
 663     if (Displacement::is_shortDisp(imm)) {
 664       z_la(r1, imm, r2);
 665       return;
 666     }
 667     // Can we encode imm in 20 bits signed?
 668     if (Displacement::is_validDisp(imm)) {
 669       // Always use LAY instruction, so we don't need the tmp register.
 670       z_lay(r1, imm, r2);
 671       return;
 672     }
 673 
 674   }
 675 
 676   // Can handle it (all possible values) with long immediates.
 677   lgr_if_needed(r1, r2);
 678   z_agfi(r1, imm);
 679 }
 680 
 681 // Generic operation r := b + x + d
 682 //
 683 // Addition of several operands with address generation semantics - sort of:
 684 //  - no restriction on the registers. Any register will do for any operand.
 685 //  - x == noreg: operand will be disregarded.
 686 //  - b == noreg: will use (contents of) result reg as operand (r := r + d).
 687 //  - x == Z_R0:  just disregard
 688 //  - b == Z_R0:  use as operand. This is not address generation semantics!!!
 689 //
 690 // The same restrictions as on add2reg() are valid!!!
 691 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {
 692   assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");
 693 
 694   if (x == noreg) { x = Z_R0; }
 695   if (b == noreg) { b = r; }
 696 
 697   // Handle special case x == R0.
 698   if (x == Z_R0) {
 699     // Can simply add the immediate value to the base register.
 700     add2reg(r, d, b);
 701     return;
 702   }
 703 
 704   if (!PreferLAoverADD || (b == Z_R0)) {
 705     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 706     // Handle special case d == 0.
 707     if (d == 0) {
 708       if (b == x)        { z_sllg(r, b, 1); return; }
 709       if (r == x)        { z_agr(r, b);     return; }
 710       if (r == b)        { z_agr(r, x);     return; }
 711       if (distinctOpnds) { z_agrk(r, x, b); return; }
 712       z_lgr(r, b);
 713       z_agr(r, x);
 714     } else {
 715       if (x == b)             { z_sllg(r, x, 1); }
 716       else if (r == x)        { z_agr(r, b); }
 717       else if (r == b)        { z_agr(r, x); }
 718       else if (distinctOpnds) { z_agrk(r, x, b); }
 719       else {
 720         z_lgr(r, b);
 721         z_agr(r, x);
 722       }
 723       add2reg(r, d);
 724     }
 725   } else {
 726     // Can we encode imm in 12 bits unsigned?
 727     if (Displacement::is_shortDisp(d)) {
 728       z_la(r, d, x, b);
 729       return;
 730     }
 731     // Can we encode imm in 20 bits signed?
 732     if (Displacement::is_validDisp(d)) {
 733       z_lay(r, d, x, b);
 734       return;
 735     }
 736     z_la(r, 0, x, b);
 737     add2reg(r, d);
 738   }
 739 }
 740 
 741 // Generic emitter (32bit) for direct memory increment.
 742 // For optimal code, do not specify Z_R0 as temp register.
 743 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {
 744   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 745     z_asi(a, imm);
 746   } else {
 747     z_lgf(tmp, a);
 748     add2reg(tmp, imm);
 749     z_st(tmp, a);
 750   }
 751 }
 752 
 753 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {
 754   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 755     z_agsi(a, imm);
 756   } else {
 757     z_lg(tmp, a);
 758     add2reg(tmp, imm);
 759     z_stg(tmp, a);
 760   }
 761 }
 762 
 763 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
 764   switch (size_in_bytes) {
 765     case  8: z_lg(dst, src); break;
 766     case  4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;
 767     case  2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;
 768     case  1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;
 769     default: ShouldNotReachHere();
 770   }
 771 }
 772 
 773 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
 774   switch (size_in_bytes) {
 775     case  8: z_stg(src, dst); break;
 776     case  4: z_st(src, dst); break;
 777     case  2: z_sth(src, dst); break;
 778     case  1: z_stc(src, dst); break;
 779     default: ShouldNotReachHere();
 780   }
 781 }
 782 
 783 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and
 784 // a high-order summand in register tmp.
 785 //
 786 // return value: <  0: No split required, si20 actually has property uimm12.
 787 //               >= 0: Split performed. Use return value as uimm12 displacement and
 788 //                     tmp as index register.
 789 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {
 790   assert(Immediate::is_simm20(si20_offset), "sanity");
 791   int lg_off = (int)si20_offset &  0x0fff; // Punch out low-order 12 bits, always positive.
 792   int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.
 793   assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||
 794          !Displacement::is_shortDisp(si20_offset), "unexpected offset values");
 795   assert((lg_off+ll_off) == si20_offset, "offset splitup error");
 796 
 797   Register work = accumulate? Z_R0 : tmp;
 798 
 799   if (fixed_codelen) {          // Len of code = 10 = 4 + 6.
 800     z_lghi(work, ll_off>>12);   // Implicit sign extension.
 801     z_slag(work, work, 12);
 802   } else {                      // Len of code = 0..10.
 803     if (ll_off == 0) { return -1; }
 804     // ll_off has 8 significant bits (at most) plus sign.
 805     if ((ll_off & 0x0000f000) == 0) {    // Non-zero bits only in upper halfbyte.
 806       z_llilh(work, ll_off >> 16);
 807       if (ll_off < 0) {                  // Sign-extension required.
 808         z_lgfr(work, work);
 809       }
 810     } else {
 811       if ((ll_off & 0x000f0000) == 0) {  // Non-zero bits only in lower halfbyte.
 812         z_llill(work, ll_off);
 813       } else {                           // Non-zero bits in both halfbytes.
 814         z_lghi(work, ll_off>>12);        // Implicit sign extension.
 815         z_slag(work, work, 12);
 816       }
 817     }
 818   }
 819   if (accumulate) { z_algr(tmp, work); } // len of code += 4
 820   return lg_off;
 821 }
 822 
 823 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 824   if (Displacement::is_validDisp(si20)) {
 825     z_ley(t, si20, a);
 826   } else {
 827     // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset
 828     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 829     // pool loads).
 830     bool accumulate    = true;
 831     bool fixed_codelen = true;
 832     Register work;
 833 
 834     if (fixed_codelen) {
 835       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 836     } else {
 837       accumulate = (a == tmp);
 838     }
 839     work = tmp;
 840 
 841     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 842     if (disp12 < 0) {
 843       z_le(t, si20, work);
 844     } else {
 845       if (accumulate) {
 846         z_le(t, disp12, work);
 847       } else {
 848         z_le(t, disp12, work, a);
 849       }
 850     }
 851   }
 852 }
 853 
 854 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 855   if (Displacement::is_validDisp(si20)) {
 856     z_ldy(t, si20, a);
 857   } else {
 858     // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset
 859     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 860     // pool loads).
 861     bool accumulate    = true;
 862     bool fixed_codelen = true;
 863     Register work;
 864 
 865     if (fixed_codelen) {
 866       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 867     } else {
 868       accumulate = (a == tmp);
 869     }
 870     work = tmp;
 871 
 872     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 873     if (disp12 < 0) {
 874       z_ld(t, si20, work);
 875     } else {
 876       if (accumulate) {
 877         z_ld(t, disp12, work);
 878       } else {
 879         z_ld(t, disp12, work, a);
 880       }
 881     }
 882   }
 883 }
 884 
 885 // PCrelative TOC access.
 886 // Returns distance (in bytes) from current position to start of consts section.
 887 // Returns 0 (zero) if no consts section exists or if it has size zero.
 888 long MacroAssembler::toc_distance() {
 889   CodeSection* cs = code()->consts();
 890   return (long)((cs != NULL) ? cs->start()-pc() : 0);
 891 }
 892 
 893 // Implementation on x86/sparc assumes that constant and instruction section are
 894 // adjacent, but this doesn't hold. Two special situations may occur, that we must
 895 // be able to handle:
 896 //   1. const section may be located apart from the inst section.
 897 //   2. const section may be empty
 898 // In both cases, we use the const section's start address to compute the "TOC",
 899 // this seems to occur only temporarily; in the final step we always seem to end up
 900 // with the pc-relatice variant.
 901 //
 902 // PC-relative offset could be +/-2**32 -> use long for disp
 903 // Furthermore: makes no sense to have special code for
 904 // adjacent const and inst sections.
 905 void MacroAssembler::load_toc(Register Rtoc) {
 906   // Simply use distance from start of const section (should be patched in the end).
 907   long disp = toc_distance();
 908 
 909   RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);
 910   relocate(rspec);
 911   z_larl(Rtoc, RelAddr::pcrel_off32(disp));  // Offset is in halfwords.
 912 }
 913 
 914 // PCrelative TOC access.
 915 // Load from anywhere pcrelative (with relocation of load instr)
 916 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
 917   address          pc             = this->pc();
 918   ptrdiff_t        total_distance = dataLocation - pc;
 919   RelocationHolder rspec          = internal_word_Relocation::spec(dataLocation);
 920 
 921   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 922   assert(total_distance != 0, "sanity");
 923 
 924   // Some extra safety net.
 925   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 926     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 927   }
 928 
 929   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 930   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 931 }
 932 
 933 
 934 // PCrelative TOC access.
 935 // Load from anywhere pcrelative (with relocation of load instr)
 936 // loaded addr has to be relocated when added to constant pool.
 937 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
 938   address          pc             = this->pc();
 939   ptrdiff_t        total_distance = addrLocation - pc;
 940   RelocationHolder rspec          = internal_word_Relocation::spec(addrLocation);
 941 
 942   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 943 
 944   // Some extra safety net.
 945   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 946     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 947   }
 948 
 949   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 950   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 951 }
 952 
 953 // Generic operation: load a value from memory and test.
 954 // CondCode indicates the sign (<0, ==0, >0) of the loaded value.
 955 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {
 956   z_lb(dst, a);
 957   z_ltr(dst, dst);
 958 }
 959 
 960 void MacroAssembler::load_and_test_short(Register dst, const Address &a) {
 961   int64_t disp = a.disp20();
 962   if (Displacement::is_shortDisp(disp)) {
 963     z_lh(dst, a);
 964   } else if (Displacement::is_longDisp(disp)) {
 965     z_lhy(dst, a);
 966   } else {
 967     guarantee(false, "displacement out of range");
 968   }
 969   z_ltr(dst, dst);
 970 }
 971 
 972 void MacroAssembler::load_and_test_int(Register dst, const Address &a) {
 973   z_lt(dst, a);
 974 }
 975 
 976 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {
 977   z_ltgf(dst, a);
 978 }
 979 
 980 void MacroAssembler::load_and_test_long(Register dst, const Address &a) {
 981   z_ltg(dst, a);
 982 }
 983 
 984 // Test a bit in memory.
 985 void MacroAssembler::testbit(const Address &a, unsigned int bit) {
 986   assert(a.index() == noreg, "no index reg allowed in testbit");
 987   if (bit <= 7) {
 988     z_tm(a.disp() + 3, a.base(), 1 << bit);
 989   } else if (bit <= 15) {
 990     z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));
 991   } else if (bit <= 23) {
 992     z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));
 993   } else if (bit <= 31) {
 994     z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));
 995   } else {
 996     ShouldNotReachHere();
 997   }
 998 }
 999 
1000 // Test a bit in a register. Result is reflected in CC.
1001 void MacroAssembler::testbit(Register r, unsigned int bitPos) {
1002   if (bitPos < 16) {
1003     z_tmll(r, 1U<<bitPos);
1004   } else if (bitPos < 32) {
1005     z_tmlh(r, 1U<<(bitPos-16));
1006   } else if (bitPos < 48) {
1007     z_tmhl(r, 1U<<(bitPos-32));
1008   } else if (bitPos < 64) {
1009     z_tmhh(r, 1U<<(bitPos-48));
1010   } else {
1011     ShouldNotReachHere();
1012   }
1013 }
1014 
1015 void MacroAssembler::prefetch_read(Address a) {
1016   z_pfd(1, a.disp20(), a.indexOrR0(), a.base());
1017 }
1018 void MacroAssembler::prefetch_update(Address a) {
1019   z_pfd(2, a.disp20(), a.indexOrR0(), a.base());
1020 }
1021 
1022 // Clear a register, i.e. load const zero into reg.
1023 // Return len (in bytes) of generated instruction(s).
1024 // whole_reg: Clear 64 bits if true, 32 bits otherwise.
1025 // set_cc:    Use instruction that sets the condition code, if true.
1026 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {
1027   unsigned int start_off = offset();
1028   if (whole_reg) {
1029     set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);
1030   } else {  // Only 32bit register.
1031     set_cc ? z_xr(r, r) : z_lhi(r, 0);
1032   }
1033   return offset() - start_off;
1034 }
1035 
1036 #ifdef ASSERT
1037 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {
1038   switch (pattern_len) {
1039     case 1:
1040       pattern = (pattern & 0x000000ff)  | ((pattern & 0x000000ff)<<8);
1041     case 2:
1042       pattern = (pattern & 0x0000ffff)  | ((pattern & 0x0000ffff)<<16);
1043     case 4:
1044       pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);
1045     case 8:
1046       return load_const_optimized_rtn_len(r, pattern, true);
1047       break;
1048     default:
1049       guarantee(false, "preset_reg: bad len");
1050   }
1051   return 0;
1052 }
1053 #endif
1054 
1055 // addr: Address descriptor of memory to clear index register will not be used !
1056 // size: Number of bytes to clear.
1057 //    !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!
1058 //    !!! Use store_const() instead                  !!!
1059 void MacroAssembler::clear_mem(const Address& addr, unsigned size) {
1060   guarantee(size <= 256, "MacroAssembler::clear_mem: size too large");
1061 
1062   if (size == 1) {
1063     z_mvi(addr, 0);
1064     return;
1065   }
1066 
1067   switch (size) {
1068     case 2: z_mvhhi(addr, 0);
1069       return;
1070     case 4: z_mvhi(addr, 0);
1071       return;
1072     case 8: z_mvghi(addr, 0);
1073       return;
1074     default: ; // Fallthru to xc.
1075   }
1076 
1077   z_xc(addr, size, addr);
1078 }
1079 
1080 void MacroAssembler::align(int modulus) {
1081   while (offset() % modulus != 0) z_nop();
1082 }
1083 
1084 // Special version for non-relocateable code if required alignment
1085 // is larger than CodeEntryAlignment.
1086 void MacroAssembler::align_address(int modulus) {
1087   while ((uintptr_t)pc() % modulus != 0) z_nop();
1088 }
1089 
1090 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1091                                          Register temp_reg,
1092                                          int64_t extra_slot_offset) {
1093   // On Z, we can have index and disp in an Address. So don't call argument_offset,
1094   // which issues an unnecessary add instruction.
1095   int stackElementSize = Interpreter::stackElementSize;
1096   int64_t offset = extra_slot_offset * stackElementSize;
1097   const Register argbase = Z_esp;
1098   if (arg_slot.is_constant()) {
1099     offset += arg_slot.as_constant() * stackElementSize;
1100     return Address(argbase, offset);
1101   }
1102   // else
1103   assert(temp_reg != noreg, "must specify");
1104   assert(temp_reg != Z_ARG1, "base and index are conflicting");
1105   z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3
1106   return Address(argbase, temp_reg, offset);
1107 }
1108 
1109 
1110 //===================================================================
1111 //===   START   C O N S T A N T S   I N   C O D E   S T R E A M   ===
1112 //===================================================================
1113 //===            P A T CH A B L E   C O N S T A N T S             ===
1114 //===================================================================
1115 
1116 
1117 //---------------------------------------------------
1118 //  Load (patchable) constant into register
1119 //---------------------------------------------------
1120 
1121 
1122 // Load absolute address (and try to optimize).
1123 //   Note: This method is usable only for position-fixed code,
1124 //         referring to a position-fixed target location.
1125 //         If not so, relocations and patching must be used.
1126 void MacroAssembler::load_absolute_address(Register d, address addr) {
1127   assert(addr != NULL, "should not happen");
1128   BLOCK_COMMENT("load_absolute_address:");
1129   if (addr == NULL) {
1130     z_larl(d, pc()); // Dummy emit for size calc.
1131     return;
1132   }
1133 
1134   if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {
1135     z_larl(d, addr);
1136     return;
1137   }
1138 
1139   load_const_optimized(d, (long)addr);
1140 }
1141 
1142 // Load a 64bit constant.
1143 // Patchable code sequence, but not atomically patchable.
1144 // Make sure to keep code size constant -> no value-dependent optimizations.
1145 // Do not kill condition code.
1146 void MacroAssembler::load_const(Register t, long x) {
1147   // Note: Right shift is only cleanly defined for unsigned types
1148   //       or for signed types with nonnegative values.
1149   Assembler::z_iihf(t, (long)((unsigned long)x >> 32));
1150   Assembler::z_iilf(t, (long)((unsigned long)x & 0xffffffffUL));
1151 }
1152 
1153 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend.
1154 // Patchable code sequence, but not atomically patchable.
1155 // Make sure to keep code size constant -> no value-dependent optimizations.
1156 // Do not kill condition code.
1157 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {
1158   if (sign_extend) { Assembler::z_lgfi(t, x); }
1159   else             { Assembler::z_llilf(t, x); }
1160 }
1161 
1162 // Load narrow oop constant, no decompression.
1163 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
1164   assert(UseCompressedOops, "must be on to call this method");
1165   load_const_32to64(t, CompressedOops::narrow_oop_value(a), false /*sign_extend*/);
1166 }
1167 
1168 // Load narrow klass constant, compression required.
1169 void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
1170   assert(UseCompressedClassPointers, "must be on to call this method");
1171   narrowKlass encoded_k = CompressedKlassPointers::encode(k);
1172   load_const_32to64(t, encoded_k, false /*sign_extend*/);
1173 }
1174 
1175 //------------------------------------------------------
1176 //  Compare (patchable) constant with register.
1177 //------------------------------------------------------
1178 
1179 // Compare narrow oop in reg with narrow oop constant, no decompression.
1180 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
1181   assert(UseCompressedOops, "must be on to call this method");
1182 
1183   Assembler::z_clfi(oop1, CompressedOops::narrow_oop_value(oop2));
1184 }
1185 
1186 // Compare narrow oop in reg with narrow oop constant, no decompression.
1187 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
1188   assert(UseCompressedClassPointers, "must be on to call this method");
1189   narrowKlass encoded_k = CompressedKlassPointers::encode(klass2);
1190 
1191   Assembler::z_clfi(klass1, encoded_k);
1192 }
1193 
1194 //----------------------------------------------------------
1195 //  Check which kind of load_constant we have here.
1196 //----------------------------------------------------------
1197 
1198 // Detection of CPU version dependent load_const sequence.
1199 // The detection is valid only for code sequences generated by load_const,
1200 // not load_const_optimized.
1201 bool MacroAssembler::is_load_const(address a) {
1202   unsigned long inst1, inst2;
1203   unsigned int  len1,  len2;
1204 
1205   len1 = get_instruction(a, &inst1);
1206   len2 = get_instruction(a + len1, &inst2);
1207 
1208   return is_z_iihf(inst1) && is_z_iilf(inst2);
1209 }
1210 
1211 // Detection of CPU version dependent load_const_32to64 sequence.
1212 // Mostly used for narrow oops and narrow Klass pointers.
1213 // The detection is valid only for code sequences generated by load_const_32to64.
1214 bool MacroAssembler::is_load_const_32to64(address pos) {
1215   unsigned long inst1, inst2;
1216   unsigned int len1;
1217 
1218   len1 = get_instruction(pos, &inst1);
1219   return is_z_llilf(inst1);
1220 }
1221 
1222 // Detection of compare_immediate_narrow sequence.
1223 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1224 bool MacroAssembler::is_compare_immediate32(address pos) {
1225   return is_equal(pos, CLFI_ZOPC, RIL_MASK);
1226 }
1227 
1228 // Detection of compare_immediate_narrow sequence.
1229 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1230 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {
1231   return is_compare_immediate32(pos);
1232   }
1233 
1234 // Detection of compare_immediate_narrow sequence.
1235 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass.
1236 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {
1237   return is_compare_immediate32(pos);
1238 }
1239 
1240 //-----------------------------------
1241 //  patch the load_constant
1242 //-----------------------------------
1243 
1244 // CPU-version dependent patching of load_const.
1245 void MacroAssembler::patch_const(address a, long x) {
1246   assert(is_load_const(a), "not a load of a constant");
1247   // Note: Right shift is only cleanly defined for unsigned types
1248   //       or for signed types with nonnegative values.
1249   set_imm32((address)a, (long)((unsigned long)x >> 32));
1250   set_imm32((address)(a + 6), (long)((unsigned long)x & 0xffffffffUL));
1251 }
1252 
1253 // Patching the value of CPU version dependent load_const_32to64 sequence.
1254 // The passed ptr MUST be in compressed format!
1255 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {
1256   assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");
1257 
1258   set_imm32(pos, np);
1259   return 6;
1260 }
1261 
1262 // Patching the value of CPU version dependent compare_immediate_narrow sequence.
1263 // The passed ptr MUST be in compressed format!
1264 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
1265   assert(is_compare_immediate32(pos), "not a compressed ptr compare");
1266 
1267   set_imm32(pos, np);
1268   return 6;
1269 }
1270 
1271 // Patching the immediate value of CPU version dependent load_narrow_oop sequence.
1272 // The passed ptr must NOT be in compressed format!
1273 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
1274   assert(UseCompressedOops, "Can only patch compressed oops");
1275   return patch_load_const_32to64(pos, CompressedOops::narrow_oop_value(o));
1276 }
1277 
1278 // Patching the immediate value of CPU version dependent load_narrow_klass sequence.
1279 // The passed ptr must NOT be in compressed format!
1280 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
1281   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1282 
1283   narrowKlass nk = CompressedKlassPointers::encode(k);
1284   return patch_load_const_32to64(pos, nk);
1285 }
1286 
1287 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.
1288 // The passed ptr must NOT be in compressed format!
1289 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
1290   assert(UseCompressedOops, "Can only patch compressed oops");
1291   return patch_compare_immediate_32(pos, CompressedOops::narrow_oop_value(o));
1292 }
1293 
1294 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
1295 // The passed ptr must NOT be in compressed format!
1296 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
1297   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1298 
1299   narrowKlass nk = CompressedKlassPointers::encode(k);
1300   return patch_compare_immediate_32(pos, nk);
1301 }
1302 
1303 //------------------------------------------------------------------------
1304 //  Extract the constant from a load_constant instruction stream.
1305 //------------------------------------------------------------------------
1306 
1307 // Get constant from a load_const sequence.
1308 long MacroAssembler::get_const(address a) {
1309   assert(is_load_const(a), "not a load of a constant");
1310   unsigned long x;
1311   x =  (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);
1312   x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));
1313   return (long) x;
1314 }
1315 
1316 //--------------------------------------
1317 //  Store a constant in memory.
1318 //--------------------------------------
1319 
1320 // General emitter to move a constant to memory.
1321 // The store is atomic.
1322 //  o Address must be given in RS format (no index register)
1323 //  o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.
1324 //  o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.
1325 //  o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.
1326 //  o Memory slot must be at least as wide as constant, will assert otherwise.
1327 //  o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.
1328 int MacroAssembler::store_const(const Address &dest, long imm,
1329                                 unsigned int lm, unsigned int lc,
1330                                 Register scratch) {
1331   int64_t  disp = dest.disp();
1332   Register base = dest.base();
1333   assert(!dest.has_index(), "not supported");
1334   assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory   length not supported");
1335   assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");
1336   assert(lm>=lc, "memory slot too small");
1337   assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");
1338   assert(Displacement::is_validDisp(disp), "displacement out of range");
1339 
1340   bool is_shortDisp = Displacement::is_shortDisp(disp);
1341   int store_offset = -1;
1342 
1343   // For target len == 1 it's easy.
1344   if (lm == 1) {
1345     store_offset = offset();
1346     if (is_shortDisp) {
1347       z_mvi(disp, base, imm);
1348       return store_offset;
1349     } else {
1350       z_mviy(disp, base, imm);
1351       return store_offset;
1352     }
1353   }
1354 
1355   // All the "good stuff" takes an unsigned displacement.
1356   if (is_shortDisp) {
1357     // NOTE: Cannot use clear_mem for imm==0, because it is not atomic.
1358 
1359     store_offset = offset();
1360     switch (lm) {
1361       case 2:  // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.
1362         z_mvhhi(disp, base, imm);
1363         return store_offset;
1364       case 4:
1365         if (Immediate::is_simm16(imm)) {
1366           z_mvhi(disp, base, imm);
1367           return store_offset;
1368         }
1369         break;
1370       case 8:
1371         if (Immediate::is_simm16(imm)) {
1372           z_mvghi(disp, base, imm);
1373           return store_offset;
1374         }
1375         break;
1376       default:
1377         ShouldNotReachHere();
1378         break;
1379     }
1380   }
1381 
1382   //  Can't optimize, so load value and store it.
1383   guarantee(scratch != noreg, " need a scratch register here !");
1384   if (imm != 0) {
1385     load_const_optimized(scratch, imm);  // Preserves CC anyway.
1386   } else {
1387     // Leave CC alone!!
1388     (void) clear_reg(scratch, true, false); // Indicate unused result.
1389   }
1390 
1391   store_offset = offset();
1392   if (is_shortDisp) {
1393     switch (lm) {
1394       case 2:
1395         z_sth(scratch, disp, Z_R0, base);
1396         return store_offset;
1397       case 4:
1398         z_st(scratch, disp, Z_R0, base);
1399         return store_offset;
1400       case 8:
1401         z_stg(scratch, disp, Z_R0, base);
1402         return store_offset;
1403       default:
1404         ShouldNotReachHere();
1405         break;
1406     }
1407   } else {
1408     switch (lm) {
1409       case 2:
1410         z_sthy(scratch, disp, Z_R0, base);
1411         return store_offset;
1412       case 4:
1413         z_sty(scratch, disp, Z_R0, base);
1414         return store_offset;
1415       case 8:
1416         z_stg(scratch, disp, Z_R0, base);
1417         return store_offset;
1418       default:
1419         ShouldNotReachHere();
1420         break;
1421     }
1422   }
1423   return -1; // should not reach here
1424 }
1425 
1426 //===================================================================
1427 //===       N O T   P A T CH A B L E   C O N S T A N T S          ===
1428 //===================================================================
1429 
1430 // Load constant x into register t with a fast instruction sequence
1431 // depending on the bits in x. Preserves CC under all circumstances.
1432 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {
1433   if (x == 0) {
1434     int len;
1435     if (emit) {
1436       len = clear_reg(t, true, false);
1437     } else {
1438       len = 4;
1439     }
1440     return len;
1441   }
1442 
1443   if (Immediate::is_simm16(x)) {
1444     if (emit) { z_lghi(t, x); }
1445     return 4;
1446   }
1447 
1448   // 64 bit value: | part1 | part2 | part3 | part4 |
1449   // At least one part is not zero!
1450   // Note: Right shift is only cleanly defined for unsigned types
1451   //       or for signed types with nonnegative values.
1452   int part1 = (int)((unsigned long)x >> 48) & 0x0000ffff;
1453   int part2 = (int)((unsigned long)x >> 32) & 0x0000ffff;
1454   int part3 = (int)((unsigned long)x >> 16) & 0x0000ffff;
1455   int part4 = (int)x & 0x0000ffff;
1456   int part12 = (int)((unsigned long)x >> 32);
1457   int part34 = (int)x;
1458 
1459   // Lower word only (unsigned).
1460   if (part12 == 0) {
1461     if (part3 == 0) {
1462       if (emit) z_llill(t, part4);
1463       return 4;
1464     }
1465     if (part4 == 0) {
1466       if (emit) z_llilh(t, part3);
1467       return 4;
1468     }
1469     if (emit) z_llilf(t, part34);
1470     return 6;
1471   }
1472 
1473   // Upper word only.
1474   if (part34 == 0) {
1475     if (part1 == 0) {
1476       if (emit) z_llihl(t, part2);
1477       return 4;
1478     }
1479     if (part2 == 0) {
1480       if (emit) z_llihh(t, part1);
1481       return 4;
1482     }
1483     if (emit) z_llihf(t, part12);
1484     return 6;
1485   }
1486 
1487   // Lower word only (signed).
1488   if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {
1489     if (emit) z_lgfi(t, part34);
1490     return 6;
1491   }
1492 
1493   int len = 0;
1494 
1495   if ((part1 == 0) || (part2 == 0)) {
1496     if (part1 == 0) {
1497       if (emit) z_llihl(t, part2);
1498       len += 4;
1499     } else {
1500       if (emit) z_llihh(t, part1);
1501       len += 4;
1502     }
1503   } else {
1504     if (emit) z_llihf(t, part12);
1505     len += 6;
1506   }
1507 
1508   if ((part3 == 0) || (part4 == 0)) {
1509     if (part3 == 0) {
1510       if (emit) z_iill(t, part4);
1511       len += 4;
1512     } else {
1513       if (emit) z_iilh(t, part3);
1514       len += 4;
1515     }
1516   } else {
1517     if (emit) z_iilf(t, part34);
1518     len += 6;
1519   }
1520   return len;
1521 }
1522 
1523 //=====================================================================
1524 //===     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1525 //=====================================================================
1526 
1527 // Note: In the worst case, one of the scratch registers is destroyed!!!
1528 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1529   // Right operand is constant.
1530   if (x2.is_constant()) {
1531     jlong value = x2.as_constant();
1532     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);
1533     return;
1534   }
1535 
1536   // Right operand is in register.
1537   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);
1538 }
1539 
1540 // Note: In the worst case, one of the scratch registers is destroyed!!!
1541 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1542   // Right operand is constant.
1543   if (x2.is_constant()) {
1544     jlong value = x2.as_constant();
1545     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);
1546     return;
1547   }
1548 
1549   // Right operand is in register.
1550   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);
1551 }
1552 
1553 // Note: In the worst case, one of the scratch registers is destroyed!!!
1554 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1555   // Right operand is constant.
1556   if (x2.is_constant()) {
1557     jlong value = x2.as_constant();
1558     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);
1559     return;
1560   }
1561 
1562   // Right operand is in register.
1563   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);
1564 }
1565 
1566 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1567   // Right operand is constant.
1568   if (x2.is_constant()) {
1569     jlong value = x2.as_constant();
1570     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);
1571     return;
1572   }
1573 
1574   // Right operand is in register.
1575   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);
1576 }
1577 
1578 // Generate an optimal branch to the branch target.
1579 // Optimal means that a relative branch (brc or brcl) is used if the
1580 // branch distance is short enough. Loading the target address into a
1581 // register and branching via reg is used as fallback only.
1582 //
1583 // Used registers:
1584 //   Z_R1 - work reg. Holds branch target address.
1585 //          Used in fallback case only.
1586 //
1587 // This version of branch_optimized is good for cases where the target address is known
1588 // and constant, i.e. is never changed (no relocation, no patching).
1589 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {
1590   address branch_origin = pc();
1591 
1592   if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1593     z_brc(cond, branch_addr);
1594   } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {
1595     z_brcl(cond, branch_addr);
1596   } else {
1597     load_const_optimized(Z_R1, branch_addr);  // CC must not get killed by load_const_optimized.
1598     z_bcr(cond, Z_R1);
1599   }
1600 }
1601 
1602 // This version of branch_optimized is good for cases where the target address
1603 // is potentially not yet known at the time the code is emitted.
1604 //
1605 // One very common case is a branch to an unbound label which is handled here.
1606 // The caller might know (or hope) that the branch distance is short enough
1607 // to be encoded in a 16bit relative address. In this case he will pass a
1608 // NearLabel branch_target.
1609 // Care must be taken with unbound labels. Each call to target(label) creates
1610 // an entry in the patch queue for that label to patch all references of the label
1611 // once it gets bound. Those recorded patch locations must be patchable. Otherwise,
1612 // an assertion fires at patch time.
1613 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {
1614   if (branch_target.is_bound()) {
1615     address branch_addr = target(branch_target);
1616     branch_optimized(cond, branch_addr);
1617   } else if (branch_target.is_near()) {
1618     z_brc(cond, branch_target);  // Caller assures that the target will be in range for z_brc.
1619   } else {
1620     z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
1621   }
1622 }
1623 
1624 // Generate an optimal compare and branch to the branch target.
1625 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1626 // branch distance is short enough. Loading the target address into a
1627 // register and branching via reg is used as fallback only.
1628 //
1629 // Input:
1630 //   r1 - left compare operand
1631 //   r2 - right compare operand
1632 void MacroAssembler::compare_and_branch_optimized(Register r1,
1633                                                   Register r2,
1634                                                   Assembler::branch_condition cond,
1635                                                   address  branch_addr,
1636                                                   bool     len64,
1637                                                   bool     has_sign) {
1638   unsigned int casenum = (len64?2:0)+(has_sign?0:1);
1639 
1640   address branch_origin = pc();
1641   if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1642     switch (casenum) {
1643       case 0: z_crj( r1, r2, cond, branch_addr); break;
1644       case 1: z_clrj (r1, r2, cond, branch_addr); break;
1645       case 2: z_cgrj(r1, r2, cond, branch_addr); break;
1646       case 3: z_clgrj(r1, r2, cond, branch_addr); break;
1647       default: ShouldNotReachHere(); break;
1648     }
1649   } else {
1650     switch (casenum) {
1651       case 0: z_cr( r1, r2); break;
1652       case 1: z_clr(r1, r2); break;
1653       case 2: z_cgr(r1, r2); break;
1654       case 3: z_clgr(r1, r2); break;
1655       default: ShouldNotReachHere(); break;
1656     }
1657     branch_optimized(cond, branch_addr);
1658   }
1659 }
1660 
1661 // Generate an optimal compare and branch to the branch target.
1662 // Optimal means that a relative branch (clgij, brc or brcl) is used if the
1663 // branch distance is short enough. Loading the target address into a
1664 // register and branching via reg is used as fallback only.
1665 //
1666 // Input:
1667 //   r1 - left compare operand (in register)
1668 //   x2 - right compare operand (immediate)
1669 void MacroAssembler::compare_and_branch_optimized(Register r1,
1670                                                   jlong    x2,
1671                                                   Assembler::branch_condition cond,
1672                                                   Label&   branch_target,
1673                                                   bool     len64,
1674                                                   bool     has_sign) {
1675   address      branch_origin = pc();
1676   bool         x2_imm8       = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
1677   bool         is_RelAddr16  = branch_target.is_near() ||
1678                                (branch_target.is_bound() &&
1679                                 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
1680   unsigned int casenum       = (len64?2:0)+(has_sign?0:1);
1681 
1682   if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {
1683     switch (casenum) {
1684       case 0: z_cij( r1, x2, cond, branch_target); break;
1685       case 1: z_clij(r1, x2, cond, branch_target); break;
1686       case 2: z_cgij(r1, x2, cond, branch_target); break;
1687       case 3: z_clgij(r1, x2, cond, branch_target); break;
1688       default: ShouldNotReachHere(); break;
1689     }
1690     return;
1691   }
1692 
1693   if (x2 == 0) {
1694     switch (casenum) {
1695       case 0: z_ltr(r1, r1); break;
1696       case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1697       case 2: z_ltgr(r1, r1); break;
1698       case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1699       default: ShouldNotReachHere(); break;
1700     }
1701   } else {
1702     if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {
1703       switch (casenum) {
1704         case 0: z_chi(r1, x2); break;
1705         case 1: z_chi(r1, x2); break; // positive immediate < 2**15
1706         case 2: z_cghi(r1, x2); break;
1707         case 3: z_cghi(r1, x2); break; // positive immediate < 2**15
1708         default: break;
1709       }
1710     } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {
1711       switch (casenum) {
1712         case 0: z_cfi( r1, x2); break;
1713         case 1: z_clfi(r1, x2); break;
1714         case 2: z_cgfi(r1, x2); break;
1715         case 3: z_clgfi(r1, x2); break;
1716         default: ShouldNotReachHere(); break;
1717       }
1718     } else {
1719       // No instruction with immediate operand possible, so load into register.
1720       Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;
1721       load_const_optimized(scratch, x2);
1722       switch (casenum) {
1723         case 0: z_cr( r1, scratch); break;
1724         case 1: z_clr(r1, scratch); break;
1725         case 2: z_cgr(r1, scratch); break;
1726         case 3: z_clgr(r1, scratch); break;
1727         default: ShouldNotReachHere(); break;
1728       }
1729     }
1730   }
1731   branch_optimized(cond, branch_target);
1732 }
1733 
1734 // Generate an optimal compare and branch to the branch target.
1735 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1736 // branch distance is short enough. Loading the target address into a
1737 // register and branching via reg is used as fallback only.
1738 //
1739 // Input:
1740 //   r1 - left compare operand
1741 //   r2 - right compare operand
1742 void MacroAssembler::compare_and_branch_optimized(Register r1,
1743                                                   Register r2,
1744                                                   Assembler::branch_condition cond,
1745                                                   Label&   branch_target,
1746                                                   bool     len64,
1747                                                   bool     has_sign) {
1748   unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
1749 
1750   if (branch_target.is_bound()) {
1751     address branch_addr = target(branch_target);
1752     compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
1753   } else {
1754     if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
1755       switch (casenum) {
1756         case 0: z_crj(  r1, r2, cond, branch_target); break;
1757         case 1: z_clrj( r1, r2, cond, branch_target); break;
1758         case 2: z_cgrj( r1, r2, cond, branch_target); break;
1759         case 3: z_clgrj(r1, r2, cond, branch_target); break;
1760         default: ShouldNotReachHere(); break;
1761       }
1762     } else {
1763       switch (casenum) {
1764         case 0: z_cr( r1, r2); break;
1765         case 1: z_clr(r1, r2); break;
1766         case 2: z_cgr(r1, r2); break;
1767         case 3: z_clgr(r1, r2); break;
1768         default: ShouldNotReachHere(); break;
1769       }
1770       branch_optimized(cond, branch_target);
1771     }
1772   }
1773 }
1774 
1775 //===========================================================================
1776 //===   END     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1777 //===========================================================================
1778 
1779 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1780   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1781   int index = oop_recorder()->allocate_metadata_index(obj);
1782   RelocationHolder rspec = metadata_Relocation::spec(index);
1783   return AddressLiteral((address)obj, rspec);
1784 }
1785 
1786 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1787   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1788   int index = oop_recorder()->find_index(obj);
1789   RelocationHolder rspec = metadata_Relocation::spec(index);
1790   return AddressLiteral((address)obj, rspec);
1791 }
1792 
1793 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1794   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1795   int oop_index = oop_recorder()->allocate_oop_index(obj);
1796   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1797 }
1798 
1799 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1800   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1801   int oop_index = oop_recorder()->find_index(obj);
1802   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1803 }
1804 
1805 // NOTE: destroys r
1806 void MacroAssembler::c2bool(Register r, Register t) {
1807   z_lcr(t, r);   // t = -r
1808   z_or(r, t);    // r = -r OR r
1809   z_srl(r, 31);  // Yields 0 if r was 0, 1 otherwise.
1810 }
1811 
1812 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'
1813 // and return the resulting instruction.
1814 // Dest_pos and inst_pos are 32 bit only. These parms can only designate
1815 // relative positions.
1816 // Use correct argument types. Do not pre-calculate distance.
1817 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {
1818   int c = 0;
1819   unsigned long patched_inst = 0;
1820   if (is_call_pcrelative_short(inst) ||
1821       is_branch_pcrelative_short(inst) ||
1822       is_branchoncount_pcrelative_short(inst) ||
1823       is_branchonindex32_pcrelative_short(inst)) {
1824     c = 1;
1825     int m = fmask(15, 0);    // simm16(-1, 16, 32);
1826     int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);
1827     patched_inst = (inst & ~m) | v;
1828   } else if (is_compareandbranch_pcrelative_short(inst)) {
1829     c = 2;
1830     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1831     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1832     patched_inst = (inst & ~m) | v;
1833   } else if (is_branchonindex64_pcrelative_short(inst)) {
1834     c = 3;
1835     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1836     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1837     patched_inst = (inst & ~m) | v;
1838   } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {
1839     c = 4;
1840     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1841     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1842     patched_inst = (inst & ~m) | v;
1843   } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.
1844     c = 5;
1845     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1846     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1847     patched_inst = (inst & ~m) | v;
1848   } else {
1849     print_dbg_msg(tty, inst, "not a relative branch", 0);
1850     dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");
1851     ShouldNotReachHere();
1852   }
1853 
1854   long new_off = get_pcrel_offset(patched_inst);
1855   if (new_off != (dest_pos-inst_pos)) {
1856     tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);
1857     print_dbg_msg(tty, inst,         "<- original instruction: branch patching error", 0);
1858     print_dbg_msg(tty, patched_inst, "<- patched  instruction: branch patching error", 0);
1859 #ifdef LUCY_DBG
1860     VM_Version::z_SIGSEGV();
1861 #endif
1862     ShouldNotReachHere();
1863   }
1864   return patched_inst;
1865 }
1866 
1867 // Only called when binding labels (share/vm/asm/assembler.cpp)
1868 // Pass arguments as intended. Do not pre-calculate distance.
1869 void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
1870   unsigned long stub_inst;
1871   int           inst_len = get_instruction(branch, &stub_inst);
1872 
1873   set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);
1874 }
1875 
1876 
1877 // Extract relative address (aka offset).
1878 // inv_simm16 works for 4-byte instructions only.
1879 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle".
1880 long MacroAssembler::get_pcrel_offset(unsigned long inst) {
1881 
1882   if (MacroAssembler::is_pcrelative_short(inst)) {
1883     if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {
1884       return RelAddr::inv_pcrel_off16(inv_simm16(inst));
1885     } else {
1886       return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));
1887     }
1888   }
1889 
1890   if (MacroAssembler::is_pcrelative_long(inst)) {
1891     return RelAddr::inv_pcrel_off32(inv_simm32(inst));
1892   }
1893 
1894   print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);
1895 #ifdef LUCY_DBG
1896   VM_Version::z_SIGSEGV();
1897 #else
1898   ShouldNotReachHere();
1899 #endif
1900   return -1;
1901 }
1902 
1903 long MacroAssembler::get_pcrel_offset(address pc) {
1904   unsigned long inst;
1905   unsigned int  len = get_instruction(pc, &inst);
1906 
1907 #ifdef ASSERT
1908   long offset;
1909   if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {
1910     offset = get_pcrel_offset(inst);
1911   } else {
1912     offset = -1;
1913   }
1914 
1915   if (offset == -1) {
1916     dump_code_range(tty, pc, 32, "not a pcrelative instruction");
1917 #ifdef LUCY_DBG
1918     VM_Version::z_SIGSEGV();
1919 #else
1920     ShouldNotReachHere();
1921 #endif
1922   }
1923   return offset;
1924 #else
1925   return get_pcrel_offset(inst);
1926 #endif // ASSERT
1927 }
1928 
1929 // Get target address from pc-relative instructions.
1930 address MacroAssembler::get_target_addr_pcrel(address pc) {
1931   assert(is_pcrelative_long(pc), "not a pcrelative instruction");
1932   return pc + get_pcrel_offset(pc);
1933 }
1934 
1935 // Patch pc relative load address.
1936 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {
1937   unsigned long inst;
1938   // Offset is +/- 2**32 -> use long.
1939   ptrdiff_t distance = con - pc;
1940 
1941   get_instruction(pc, &inst);
1942 
1943   if (is_pcrelative_short(inst)) {
1944     *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc);  // Instructions are at least 2-byte aligned, no test required.
1945 
1946     // Some extra safety net.
1947     if (!RelAddr::is_in_range_of_RelAddr16(distance)) {
1948       print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);
1949       dump_code_range(tty, pc, 32, "distance out of range (16bit)");
1950       guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");
1951     }
1952     return;
1953   }
1954 
1955   if (is_pcrelative_long(inst)) {
1956     *(int *)(pc+2)   = RelAddr::pcrel_off32(con, pc);
1957 
1958     // Some Extra safety net.
1959     if (!RelAddr::is_in_range_of_RelAddr32(distance)) {
1960       print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);
1961       dump_code_range(tty, pc, 32, "distance out of range (32bit)");
1962       guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");
1963     }
1964     return;
1965   }
1966 
1967   guarantee(false, "not a pcrelative instruction to patch!");
1968 }
1969 
1970 // "Current PC" here means the address just behind the basr instruction.
1971 address MacroAssembler::get_PC(Register result) {
1972   z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
1973   return pc();
1974 }
1975 
1976 // Get current PC + offset.
1977 // Offset given in bytes, must be even!
1978 // "Current PC" here means the address of the larl instruction plus the given offset.
1979 address MacroAssembler::get_PC(Register result, int64_t offset) {
1980   address here = pc();
1981   z_larl(result, offset/2); // Save target instruction address in result.
1982   return here + offset;
1983 }
1984 
1985 void MacroAssembler::instr_size(Register size, Register pc) {
1986   // Extract 2 most significant bits of current instruction.
1987   z_llgc(size, Address(pc));
1988   z_srl(size, 6);
1989   // Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6.
1990   z_ahi(size, 3);
1991   z_nill(size, 6);
1992 }
1993 
1994 // Resize_frame with SP(new) = SP(old) - [offset].
1995 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
1996 {
1997   assert_different_registers(offset, fp, Z_SP);
1998   if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
1999 
2000   z_sgr(Z_SP, offset);
2001   z_stg(fp, _z_abi(callers_sp), Z_SP);
2002 }
2003 
2004 // Resize_frame with SP(new) = [newSP] + offset.
2005 //   This emitter is useful if we already have calculated a pointer
2006 //   into the to-be-allocated stack space, e.g. with special alignment properties,
2007 //   but need some additional space, e.g. for spilling.
2008 //   newSP    is the pre-calculated pointer. It must not be modified.
2009 //   fp       holds, or is filled with, the frame pointer.
2010 //   offset   is the additional increment which is added to addr to form the new SP.
2011 //            Note: specify a negative value to reserve more space!
2012 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2013 //                    It does not guarantee that fp contains the frame pointer at the end.
2014 void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {
2015   assert_different_registers(newSP, fp, Z_SP);
2016 
2017   if (load_fp) {
2018     z_lg(fp, _z_abi(callers_sp), Z_SP);
2019   }
2020 
2021   add2reg(Z_SP, offset, newSP);
2022   z_stg(fp, _z_abi(callers_sp), Z_SP);
2023 }
2024 
2025 // Resize_frame with SP(new) = [newSP].
2026 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2027 //                    It does not guarantee that fp contains the frame pointer at the end.
2028 void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {
2029   assert_different_registers(newSP, fp, Z_SP);
2030 
2031   if (load_fp) {
2032     z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.
2033   }
2034 
2035   z_lgr(Z_SP, newSP);
2036   if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.
2037     z_stg(fp, _z_abi(callers_sp), newSP);
2038   } else {
2039     z_stg(fp, _z_abi(callers_sp), Z_SP);
2040   }
2041 }
2042 
2043 // Resize_frame with SP(new) = SP(old) + offset.
2044 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
2045   assert_different_registers(fp, Z_SP);
2046 
2047   if (load_fp) {
2048     z_lg(fp, _z_abi(callers_sp), Z_SP);
2049   }
2050   add64(Z_SP, offset);
2051   z_stg(fp, _z_abi(callers_sp), Z_SP);
2052 }
2053 
2054 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
2055 #ifdef ASSERT
2056   assert_different_registers(bytes, old_sp, Z_SP);
2057   if (!copy_sp) {
2058     z_cgr(old_sp, Z_SP);
2059     asm_assert_eq("[old_sp]!=[Z_SP]", 0x211);
2060   }
2061 #endif
2062   if (copy_sp) { z_lgr(old_sp, Z_SP); }
2063   if (bytes_with_inverted_sign) {
2064     z_agr(Z_SP, bytes);
2065   } else {
2066     z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
2067   }
2068   z_stg(old_sp, _z_abi(callers_sp), Z_SP);
2069 }
2070 
2071 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
2072   long offset = Assembler::align(bytes, frame::alignment_in_bytes);
2073   assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);
2074   assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);
2075 
2076   // We must not write outside the current stack bounds (given by Z_SP).
2077   // Thus, we have to first update Z_SP and then store the previous SP as stack linkage.
2078   // We rely on Z_R0 by default to be available as scratch.
2079   z_lgr(scratch, Z_SP);
2080   add2reg(Z_SP, -offset);
2081   z_stg(scratch, _z_abi(callers_sp), Z_SP);
2082 #ifdef ASSERT
2083   // Just make sure nobody uses the value in the default scratch register.
2084   // When another register is used, the caller might rely on it containing the frame pointer.
2085   if (scratch == Z_R0) {
2086     z_iihf(scratch, 0xbaadbabe);
2087     z_iilf(scratch, 0xdeadbeef);
2088   }
2089 #endif
2090   return offset;
2091 }
2092 
2093 // Push a frame of size `bytes' plus abi160 on top.
2094 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {
2095   BLOCK_COMMENT("push_frame_abi160 {");
2096   unsigned int res = push_frame(bytes + frame::z_abi_160_size);
2097   BLOCK_COMMENT("} push_frame_abi160");
2098   return res;
2099 }
2100 
2101 // Pop current C frame.
2102 void MacroAssembler::pop_frame() {
2103   BLOCK_COMMENT("pop_frame:");
2104   Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
2105 }
2106 
2107 // Pop current C frame and restore return PC register (Z_R14).
2108 void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {
2109   BLOCK_COMMENT("pop_frame_restore_retPC:");
2110   int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes;
2111   // If possible, pop frame by add instead of load (a penny saved is a penny got :-).
2112   if (Displacement::is_validDisp(retPC_offset)) {
2113     z_lg(Z_R14, retPC_offset, Z_SP);
2114     add2reg(Z_SP, frame_size_in_bytes);
2115   } else {
2116     add2reg(Z_SP, frame_size_in_bytes);
2117     restore_return_pc();
2118   }
2119 }
2120 
2121 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
2122   if (allow_relocation) {
2123     call_c(entry_point);
2124   } else {
2125     call_c_static(entry_point);
2126   }
2127 }
2128 
2129 void MacroAssembler::call_VM_leaf_base(address entry_point) {
2130   bool allow_relocation = true;
2131   call_VM_leaf_base(entry_point, allow_relocation);
2132 }
2133 
2134 void MacroAssembler::call_VM_base(Register oop_result,
2135                                   Register last_java_sp,
2136                                   address  entry_point,
2137                                   bool     allow_relocation,
2138                                   bool     check_exceptions) { // Defaults to true.
2139   // Allow_relocation indicates, if true, that the generated code shall
2140   // be fit for code relocation or referenced data relocation. In other
2141   // words: all addresses must be considered variable. PC-relative addressing
2142   // is not possible then.
2143   // On the other hand, if (allow_relocation == false), addresses and offsets
2144   // may be considered stable, enabling us to take advantage of some PC-relative
2145   // addressing tweaks. These might improve performance and reduce code size.
2146 
2147   // Determine last_java_sp register.
2148   if (!last_java_sp->is_valid()) {
2149     last_java_sp = Z_SP;  // Load Z_SP as SP.
2150   }
2151 
2152   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);
2153 
2154   // ARG1 must hold thread address.
2155   z_lgr(Z_ARG1, Z_thread);
2156 
2157   address return_pc = NULL;
2158   if (allow_relocation) {
2159     return_pc = call_c(entry_point);
2160   } else {
2161     return_pc = call_c_static(entry_point);
2162   }
2163 
2164   reset_last_Java_frame(allow_relocation);
2165 
2166   // C++ interp handles this in the interpreter.
2167   check_and_handle_popframe(Z_thread);
2168   check_and_handle_earlyret(Z_thread);
2169 
2170   // Check for pending exceptions.
2171   if (check_exceptions) {
2172     // Check for pending exceptions (java_thread is set upon return).
2173     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
2174 
2175     // This used to conditionally jump to forward_exception however it is
2176     // possible if we relocate that the branch will not reach. So we must jump
2177     // around so we can always reach.
2178 
2179     Label ok;
2180     z_bre(ok); // Bcondequal is the same as bcondZero.
2181     call_stub(StubRoutines::forward_exception_entry());
2182     bind(ok);
2183   }
2184 
2185   // Get oop result if there is one and reset the value in the thread.
2186   if (oop_result->is_valid()) {
2187     get_vm_result(oop_result);
2188   }
2189 
2190   _last_calls_return_pc = return_pc;  // Wipe out other (error handling) calls.
2191 }
2192 
2193 void MacroAssembler::call_VM_base(Register oop_result,
2194                                   Register last_java_sp,
2195                                   address  entry_point,
2196                                   bool     check_exceptions) { // Defaults to true.
2197   bool allow_relocation = true;
2198   call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);
2199 }
2200 
2201 // VM calls without explicit last_java_sp.
2202 
2203 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2204   // Call takes possible detour via InterpreterMacroAssembler.
2205   call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);
2206 }
2207 
2208 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2209   // Z_ARG1 is reserved for the thread.
2210   lgr_if_needed(Z_ARG2, arg_1);
2211   call_VM(oop_result, entry_point, check_exceptions);
2212 }
2213 
2214 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2215   // Z_ARG1 is reserved for the thread.
2216   lgr_if_needed(Z_ARG2, arg_1);
2217   assert(arg_2 != Z_ARG2, "smashed argument");
2218   lgr_if_needed(Z_ARG3, arg_2);
2219   call_VM(oop_result, entry_point, check_exceptions);
2220 }
2221 
2222 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2223                              Register arg_3, bool check_exceptions) {
2224   // Z_ARG1 is reserved for the thread.
2225   lgr_if_needed(Z_ARG2, arg_1);
2226   assert(arg_2 != Z_ARG2, "smashed argument");
2227   lgr_if_needed(Z_ARG3, arg_2);
2228   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2229   lgr_if_needed(Z_ARG4, arg_3);
2230   call_VM(oop_result, entry_point, check_exceptions);
2231 }
2232 
2233 // VM static calls without explicit last_java_sp.
2234 
2235 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {
2236   // Call takes possible detour via InterpreterMacroAssembler.
2237   call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);
2238 }
2239 
2240 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2241                                     Register arg_3, bool check_exceptions) {
2242   // Z_ARG1 is reserved for the thread.
2243   lgr_if_needed(Z_ARG2, arg_1);
2244   assert(arg_2 != Z_ARG2, "smashed argument");
2245   lgr_if_needed(Z_ARG3, arg_2);
2246   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2247   lgr_if_needed(Z_ARG4, arg_3);
2248   call_VM_static(oop_result, entry_point, check_exceptions);
2249 }
2250 
2251 // VM calls with explicit last_java_sp.
2252 
2253 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {
2254   // Call takes possible detour via InterpreterMacroAssembler.
2255   call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);
2256 }
2257 
2258 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
2259    // Z_ARG1 is reserved for the thread.
2260    lgr_if_needed(Z_ARG2, arg_1);
2261    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2262 }
2263 
2264 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2265                              Register arg_2, bool check_exceptions) {
2266    // Z_ARG1 is reserved for the thread.
2267    lgr_if_needed(Z_ARG2, arg_1);
2268    assert(arg_2 != Z_ARG2, "smashed argument");
2269    lgr_if_needed(Z_ARG3, arg_2);
2270    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2271 }
2272 
2273 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2274                              Register arg_2, Register arg_3, bool check_exceptions) {
2275   // Z_ARG1 is reserved for the thread.
2276   lgr_if_needed(Z_ARG2, arg_1);
2277   assert(arg_2 != Z_ARG2, "smashed argument");
2278   lgr_if_needed(Z_ARG3, arg_2);
2279   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2280   lgr_if_needed(Z_ARG4, arg_3);
2281   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2282 }
2283 
2284 // VM leaf calls.
2285 
2286 void MacroAssembler::call_VM_leaf(address entry_point) {
2287   // Call takes possible detour via InterpreterMacroAssembler.
2288   call_VM_leaf_base(entry_point, true);
2289 }
2290 
2291 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
2292   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2293   call_VM_leaf(entry_point);
2294 }
2295 
2296 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
2297   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2298   assert(arg_2 != Z_ARG1, "smashed argument");
2299   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2300   call_VM_leaf(entry_point);
2301 }
2302 
2303 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2304   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2305   assert(arg_2 != Z_ARG1, "smashed argument");
2306   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2307   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2308   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2309   call_VM_leaf(entry_point);
2310 }
2311 
2312 // Static VM leaf calls.
2313 // Really static VM leaf calls are never patched.
2314 
2315 void MacroAssembler::call_VM_leaf_static(address entry_point) {
2316   // Call takes possible detour via InterpreterMacroAssembler.
2317   call_VM_leaf_base(entry_point, false);
2318 }
2319 
2320 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {
2321   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2322   call_VM_leaf_static(entry_point);
2323 }
2324 
2325 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {
2326   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2327   assert(arg_2 != Z_ARG1, "smashed argument");
2328   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2329   call_VM_leaf_static(entry_point);
2330 }
2331 
2332 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2333   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2334   assert(arg_2 != Z_ARG1, "smashed argument");
2335   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2336   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2337   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2338   call_VM_leaf_static(entry_point);
2339 }
2340 
2341 // Don't use detour via call_c(reg).
2342 address MacroAssembler::call_c(address function_entry) {
2343   load_const(Z_R1, function_entry);
2344   return call(Z_R1);
2345 }
2346 
2347 // Variant for really static (non-relocatable) calls which are never patched.
2348 address MacroAssembler::call_c_static(address function_entry) {
2349   load_absolute_address(Z_R1, function_entry);
2350 #if 0 // def ASSERT
2351   // Verify that call site did not move.
2352   load_const_optimized(Z_R0, function_entry);
2353   z_cgr(Z_R1, Z_R0);
2354   z_brc(bcondEqual, 3);
2355   z_illtrap(0xba);
2356 #endif
2357   return call(Z_R1);
2358 }
2359 
2360 address MacroAssembler::call_c_opt(address function_entry) {
2361   bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
2362   _last_calls_return_pc = success ? pc() : NULL;
2363   return _last_calls_return_pc;
2364 }
2365 
2366 // Identify a call_far_patchable instruction: LARL + LG + BASR
2367 //
2368 //    nop                   ; optionally, if required for alignment
2369 //    lgrl rx,A(TOC entry)  ; PC-relative access into constant pool
2370 //    basr Z_R14,rx         ; end of this instruction must be aligned to a word boundary
2371 //
2372 // Code pattern will eventually get patched into variant2 (see below for detection code).
2373 //
2374 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {
2375   address iaddr = instruction_addr;
2376 
2377   // Check for the actual load instruction.
2378   if (!is_load_const_from_toc(iaddr)) { return false; }
2379   iaddr += load_const_from_toc_size();
2380 
2381   // Check for the call (BASR) instruction, finally.
2382   assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");
2383   return is_call_byregister(iaddr);
2384 }
2385 
2386 // Identify a call_far_patchable instruction: BRASL
2387 //
2388 // Code pattern to suits atomic patching:
2389 //    nop                       ; Optionally, if required for alignment.
2390 //    nop    ...                ; Multiple filler nops to compensate for size difference (variant0 is longer).
2391 //    nop                       ; For code pattern detection: Prepend each BRASL with a nop.
2392 //    brasl  Z_R14,<reladdr>    ; End of code must be 4-byte aligned !
2393 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {
2394   const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());
2395 
2396   // Check for correct number of leading nops.
2397   address iaddr;
2398   for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {
2399     if (!is_z_nop(iaddr)) { return false; }
2400   }
2401   assert(iaddr == call_addr, "sanity");
2402 
2403   // --> Check for call instruction.
2404   if (is_call_far_pcrelative(call_addr)) {
2405     assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");
2406     return true;
2407   }
2408 
2409   return false;
2410 }
2411 
2412 // Emit a NOT mt-safely patchable 64 bit absolute call.
2413 // If toc_offset == -2, then the destination of the call (= target) is emitted
2414 //                      to the constant pool and a runtime_call relocation is added
2415 //                      to the code buffer.
2416 // If toc_offset != -2, target must already be in the constant pool at
2417 //                      _ctableStart+toc_offset (a caller can retrieve toc_offset
2418 //                      from the runtime_call relocation).
2419 // Special handling of emitting to scratch buffer when there is no constant pool.
2420 // Slightly changed code pattern. We emit an additional nop if we would
2421 // not end emitting at a word aligned address. This is to ensure
2422 // an atomically patchable displacement in brasl instructions.
2423 //
2424 // A call_far_patchable comes in different flavors:
2425 //  - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)
2426 //  - LGRL(CP) / BR          (address in constant pool, pc-relative access)
2427 //  - BRASL                  (relative address of call target coded in instruction)
2428 // All flavors occupy the same amount of space. Length differences are compensated
2429 // by leading nops, such that the instruction sequence always ends at the same
2430 // byte offset. This is required to keep the return offset constant.
2431 // Furthermore, the return address (the end of the instruction sequence) is forced
2432 // to be on a 4-byte boundary. This is required for atomic patching, should we ever
2433 // need to patch the call target of the BRASL flavor.
2434 // RETURN value: false, if no constant pool entry could be allocated, true otherwise.
2435 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {
2436   // Get current pc and ensure word alignment for end of instr sequence.
2437   const address start_pc = pc();
2438   const intptr_t       start_off = offset();
2439   assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");
2440   const ptrdiff_t      dist      = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.
2441   const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();
2442   const bool emit_relative_call  = !emit_target_to_pool &&
2443                                    RelAddr::is_in_range_of_RelAddr32(dist) &&
2444                                    ReoptimizeCallSequences &&
2445                                    !code_section()->scratch_emit();
2446 
2447   if (emit_relative_call) {
2448     // Add padding to get the same size as below.
2449     const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();
2450     unsigned int current_padding;
2451     for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }
2452     assert(current_padding == padding, "sanity");
2453 
2454     // relative call: len = 2(nop) + 6 (brasl)
2455     // CodeBlob resize cannot occur in this case because
2456     // this call is emitted into pre-existing space.
2457     z_nop(); // Prepend each BRASL with a nop.
2458     z_brasl(Z_R14, target);
2459   } else {
2460     // absolute call: Get address from TOC.
2461     // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}
2462     if (emit_target_to_pool) {
2463       // When emitting the call for the first time, we do not need to use
2464       // the pc-relative version. It will be patched anyway, when the code
2465       // buffer is copied.
2466       // Relocation is not needed when !ReoptimizeCallSequences.
2467       relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;
2468       AddressLiteral dest(target, rt);
2469       // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills
2470       // inst_mark(). Reset if possible.
2471       bool reset_mark = (inst_mark() == pc());
2472       tocOffset = store_oop_in_toc(dest);
2473       if (reset_mark) { set_inst_mark(); }
2474       if (tocOffset == -1) {
2475         return false; // Couldn't create constant pool entry.
2476       }
2477     }
2478     assert(offset() == start_off, "emit no code before this point!");
2479 
2480     address tocPos = pc() + tocOffset;
2481     if (emit_target_to_pool) {
2482       tocPos = code()->consts()->start() + tocOffset;
2483     }
2484     load_long_pcrelative(Z_R14, tocPos);
2485     z_basr(Z_R14, Z_R14);
2486   }
2487 
2488 #ifdef ASSERT
2489   // Assert that we can identify the emitted call.
2490   assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");
2491   assert(offset() == start_off+call_far_patchable_size(), "wrong size");
2492 
2493   if (emit_target_to_pool) {
2494     assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,
2495            "wrong encoding of dest address");
2496   }
2497 #endif
2498   return true; // success
2499 }
2500 
2501 // Identify a call_far_patchable instruction.
2502 // For more detailed information see header comment of call_far_patchable.
2503 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {
2504   return is_call_far_patchable_variant2_at(instruction_addr)  || // short version: BRASL
2505          is_call_far_patchable_variant0_at(instruction_addr);    // long version LARL + LG + BASR
2506 }
2507 
2508 // Does the call_far_patchable instruction use a pc-relative encoding
2509 // of the call destination?
2510 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {
2511   // Variant 2 is pc-relative.
2512   return is_call_far_patchable_variant2_at(instruction_addr);
2513 }
2514 
2515 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {
2516   // Prepend each BRASL with a nop.
2517   return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size());  // Match at position after one nop required.
2518 }
2519 
2520 // Set destination address of a call_far_patchable instruction.
2521 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {
2522   ResourceMark rm;
2523 
2524   // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).
2525   int code_size = MacroAssembler::call_far_patchable_size();
2526   CodeBuffer buf(instruction_addr, code_size);
2527   MacroAssembler masm(&buf);
2528   masm.call_far_patchable(dest, tocOffset);
2529   ICache::invalidate_range(instruction_addr, code_size); // Empty on z.
2530 }
2531 
2532 // Get dest address of a call_far_patchable instruction.
2533 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {
2534   // Dynamic TOC: absolute address in constant pool.
2535   // Check variant2 first, it is more frequent.
2536 
2537   // Relative address encoded in call instruction.
2538   if (is_call_far_patchable_variant2_at(instruction_addr)) {
2539     return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.
2540 
2541   // Absolute address in constant pool.
2542   } else if (is_call_far_patchable_variant0_at(instruction_addr)) {
2543     address iaddr = instruction_addr;
2544 
2545     long    tocOffset = get_load_const_from_toc_offset(iaddr);
2546     address tocLoc    = iaddr + tocOffset;
2547     return *(address *)(tocLoc);
2548   } else {
2549     fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);
2550     fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",
2551             *(unsigned long*)instruction_addr,
2552             *(unsigned long*)(instruction_addr+8),
2553             call_far_patchable_size());
2554     Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
2555     ShouldNotReachHere();
2556     return NULL;
2557   }
2558 }
2559 
2560 void MacroAssembler::align_call_far_patchable(address pc) {
2561   if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }
2562 }
2563 
2564 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2565 }
2566 
2567 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2568 }
2569 
2570 // Read from the polling page.
2571 // Use TM or TMY instruction, depending on read offset.
2572 //   offset = 0: Use TM, safepoint polling.
2573 //   offset < 0: Use TMY, profiling safepoint polling.
2574 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {
2575   if (Immediate::is_uimm12(offset)) {
2576     z_tm(offset, polling_page_address, mask_safepoint);
2577   } else {
2578     z_tmy(offset, polling_page_address, mask_profiling);
2579   }
2580 }
2581 
2582 // Check whether z_instruction is a read access to the polling page
2583 // which was emitted by load_from_polling_page(..).
2584 bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
2585   unsigned long z_instruction;
2586   unsigned int  ilen = get_instruction(instr_loc, &z_instruction);
2587 
2588   if (ilen == 2) { return false; } // It's none of the allowed instructions.
2589 
2590   if (ilen == 4) {
2591     if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.
2592 
2593     int ms = inv_mask(z_instruction,8,32);  // mask
2594     int ra = inv_reg(z_instruction,16,32);  // base register
2595     int ds = inv_uimm12(z_instruction);     // displacement
2596 
2597     if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {
2598       return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.
2599     }
2600 
2601   } else { /* if (ilen == 6) */
2602 
2603     assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");
2604 
2605     if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.
2606 
2607     int ms = inv_mask(z_instruction,8,48);  // mask
2608     int ra = inv_reg(z_instruction,16,48);  // base register
2609     int ds = inv_simm20(z_instruction);     // displacement
2610   }
2611 
2612   return true;
2613 }
2614 
2615 // Extract poll address from instruction and ucontext.
2616 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
2617   assert(ucontext != NULL, "must have ucontext");
2618   ucontext_t* uc = (ucontext_t*) ucontext;
2619   unsigned long z_instruction;
2620   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2621 
2622   if (ilen == 4 && is_z_tm(z_instruction)) {
2623     int ra = inv_reg(z_instruction, 16, 32);  // base register
2624     int ds = inv_uimm12(z_instruction);       // displacement
2625     address addr = (address)uc->uc_mcontext.gregs[ra];
2626     return addr + ds;
2627   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2628     int ra = inv_reg(z_instruction, 16, 48);  // base register
2629     int ds = inv_simm20(z_instruction);       // displacement
2630     address addr = (address)uc->uc_mcontext.gregs[ra];
2631     return addr + ds;
2632   }
2633 
2634   ShouldNotReachHere();
2635   return NULL;
2636 }
2637 
2638 // Extract poll register from instruction.
2639 uint MacroAssembler::get_poll_register(address instr_loc) {
2640   unsigned long z_instruction;
2641   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2642 
2643   if (ilen == 4 && is_z_tm(z_instruction)) {
2644     return (uint)inv_reg(z_instruction, 16, 32);  // base register
2645   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2646     return (uint)inv_reg(z_instruction, 16, 48);  // base register
2647   }
2648 
2649   ShouldNotReachHere();
2650   return 0;
2651 }
2652 
2653 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
2654   const Address poll_byte_addr(Z_thread, in_bytes(JavaThread::polling_word_offset()) + 7 /* Big Endian */);
2655   // Armed page has poll_bit set.
2656   z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
2657   z_brnaz(slow_path);
2658 }
2659 
2660 // Don't rely on register locking, always use Z_R1 as scratch register instead.
2661 void MacroAssembler::bang_stack_with_offset(int offset) {
2662   // Stack grows down, caller passes positive offset.
2663   assert(offset > 0, "must bang with positive offset");
2664   if (Displacement::is_validDisp(-offset)) {
2665     z_tmy(-offset, Z_SP, mask_stackbang);
2666   } else {
2667     add2reg(Z_R1, -offset, Z_SP);    // Do not destroy Z_SP!!!
2668     z_tm(0, Z_R1, mask_stackbang);  // Just banging.
2669   }
2670 }
2671 
2672 void MacroAssembler::reserved_stack_check(Register return_pc) {
2673   // Test if reserved zone needs to be enabled.
2674   Label no_reserved_zone_enabling;
2675   assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2676   BLOCK_COMMENT("reserved_stack_check {");
2677 
2678   z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2679   z_brl(no_reserved_zone_enabling);
2680 
2681   // Enable reserved zone again, throw stack overflow exception.
2682   save_return_pc();
2683   push_frame_abi160(0);
2684   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
2685   pop_frame();
2686   restore_return_pc();
2687 
2688   load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
2689   // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
2690   z_br(Z_R1);
2691 
2692   should_not_reach_here();
2693 
2694   bind(no_reserved_zone_enabling);
2695   BLOCK_COMMENT("} reserved_stack_check");
2696 }
2697 
2698 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
2699 void MacroAssembler::tlab_allocate(Register obj,
2700                                    Register var_size_in_bytes,
2701                                    int con_size_in_bytes,
2702                                    Register t1,
2703                                    Label& slow_case) {
2704   assert_different_registers(obj, var_size_in_bytes, t1);
2705   Register end = t1;
2706   Register thread = Z_thread;
2707 
2708   z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));
2709   if (var_size_in_bytes == noreg) {
2710     z_lay(end, Address(obj, con_size_in_bytes));
2711   } else {
2712     z_lay(end, Address(obj, var_size_in_bytes));
2713   }
2714   z_cg(end, Address(thread, JavaThread::tlab_end_offset()));
2715   branch_optimized(bcondHigh, slow_case);
2716 
2717   // Update the tlab top pointer.
2718   z_stg(end, Address(thread, JavaThread::tlab_top_offset()));
2719 
2720   // Recover var_size_in_bytes if necessary.
2721   if (var_size_in_bytes == end) {
2722     z_sgr(var_size_in_bytes, obj);
2723   }
2724 }
2725 
2726 // Emitter for interface method lookup.
2727 //   input: recv_klass, intf_klass, itable_index
2728 //   output: method_result
2729 //   kills: itable_index, temp1_reg, Z_R0, Z_R1
2730 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.
2731 // If the register is still not needed then, remove it.
2732 void MacroAssembler::lookup_interface_method(Register           recv_klass,
2733                                              Register           intf_klass,
2734                                              RegisterOrConstant itable_index,
2735                                              Register           method_result,
2736                                              Register           temp1_reg,
2737                                              Label&             no_such_interface,
2738                                              bool               return_method) {
2739 
2740   const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
2741   const Register itable_entry_addr = Z_R1_scratch;
2742   const Register itable_interface = Z_R0_scratch;
2743 
2744   BLOCK_COMMENT("lookup_interface_method {");
2745 
2746   // Load start of itable entries into itable_entry_addr.
2747   z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
2748   z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
2749 
2750   // Loop over all itable entries until desired interfaceOop(Rinterface) found.
2751   const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
2752 
2753   add2reg_with_index(itable_entry_addr,
2754                      vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
2755                      recv_klass, vtable_len);
2756 
2757   const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
2758   Label     search;
2759 
2760   bind(search);
2761 
2762   // Handle IncompatibleClassChangeError.
2763   // If the entry is NULL then we've reached the end of the table
2764   // without finding the expected interface, so throw an exception.
2765   load_and_test_long(itable_interface, Address(itable_entry_addr));
2766   z_bre(no_such_interface);
2767 
2768   add2reg(itable_entry_addr, itable_offset_search_inc);
2769   z_cgr(itable_interface, intf_klass);
2770   z_brne(search);
2771 
2772   // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
2773   if (return_method) {
2774     const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
2775                                       itableOffsetEntry::interface_offset_in_bytes()) -
2776                                      itable_offset_search_inc;
2777 
2778     // Compute itableMethodEntry and get method and entry point
2779     // we use addressing with index and displacement, since the formula
2780     // for computing the entry's offset has a fixed and a dynamic part,
2781     // the latter depending on the matched interface entry and on the case,
2782     // that the itable index has been passed as a register, not a constant value.
2783     int method_offset = itableMethodEntry::method_offset_in_bytes();
2784                              // Fixed part (displacement), common operand.
2785     Register itable_offset = method_result;  // Dynamic part (index register).
2786 
2787     if (itable_index.is_register()) {
2788        // Compute the method's offset in that register, for the formula, see the
2789        // else-clause below.
2790        z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize));
2791        z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
2792     } else {
2793       // Displacement increases.
2794       method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
2795 
2796       // Load index from itable.
2797       z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
2798     }
2799 
2800     // Finally load the method's oop.
2801     z_lg(method_result, method_offset, itable_offset, recv_klass);
2802   }
2803   BLOCK_COMMENT("} lookup_interface_method");
2804 }
2805 
2806 // Lookup for virtual method invocation.
2807 void MacroAssembler::lookup_virtual_method(Register           recv_klass,
2808                                            RegisterOrConstant vtable_index,
2809                                            Register           method_result) {
2810   assert_different_registers(recv_klass, vtable_index.register_or_noreg());
2811   assert(vtableEntry::size() * wordSize == wordSize,
2812          "else adjust the scaling in the code below");
2813 
2814   BLOCK_COMMENT("lookup_virtual_method {");
2815 
2816   const int base = in_bytes(Klass::vtable_start_offset());
2817 
2818   if (vtable_index.is_constant()) {
2819     // Load with base + disp.
2820     Address vtable_entry_addr(recv_klass,
2821                               vtable_index.as_constant() * wordSize +
2822                               base +
2823                               vtableEntry::method_offset_in_bytes());
2824 
2825     z_lg(method_result, vtable_entry_addr);
2826   } else {
2827     // Shift index properly and load with base + index + disp.
2828     Register vindex = vtable_index.as_register();
2829     Address  vtable_entry_addr(recv_klass, vindex,
2830                                base + vtableEntry::method_offset_in_bytes());
2831 
2832     z_sllg(vindex, vindex, exact_log2(wordSize));
2833     z_lg(method_result, vtable_entry_addr);
2834   }
2835   BLOCK_COMMENT("} lookup_virtual_method");
2836 }
2837 
2838 // Factor out code to call ic_miss_handler.
2839 // Generate code to call the inline cache miss handler.
2840 //
2841 // In most cases, this code will be generated out-of-line.
2842 // The method parameters are intended to provide some variability.
2843 //   ICM          - Label which has to be bound to the start of useful code (past any traps).
2844 //   trapMarker   - Marking byte for the generated illtrap instructions (if any).
2845 //                  Any value except 0x00 is supported.
2846 //                  = 0x00 - do not generate illtrap instructions.
2847 //                         use nops to fill unused space.
2848 //   requiredSize - required size of the generated code. If the actually
2849 //                  generated code is smaller, use padding instructions to fill up.
2850 //                  = 0 - no size requirement, no padding.
2851 //   scratch      - scratch register to hold branch target address.
2852 //
2853 //  The method returns the code offset of the bound label.
2854 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {
2855   intptr_t startOffset = offset();
2856 
2857   // Prevent entry at content_begin().
2858   if (trapMarker != 0) {
2859     z_illtrap(trapMarker);
2860   }
2861 
2862   // Load address of inline cache miss code into scratch register
2863   // and branch to cache miss handler.
2864   BLOCK_COMMENT("IC miss handler {");
2865   BIND(ICM);
2866   unsigned int   labelOffset = offset();
2867   AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
2868 
2869   load_const_optimized(scratch, icmiss);
2870   z_br(scratch);
2871 
2872   // Fill unused space.
2873   if (requiredSize > 0) {
2874     while ((offset() - startOffset) < requiredSize) {
2875       if (trapMarker == 0) {
2876         z_nop();
2877       } else {
2878         z_illtrap(trapMarker);
2879       }
2880     }
2881   }
2882   BLOCK_COMMENT("} IC miss handler");
2883   return labelOffset;
2884 }
2885 
2886 void MacroAssembler::nmethod_UEP(Label& ic_miss) {
2887   Register ic_reg       = Z_inline_cache;
2888   int      klass_offset = oopDesc::klass_offset_in_bytes();
2889   if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
2890     if (VM_Version::has_CompareBranch()) {
2891       z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);
2892     } else {
2893       z_ltgr(Z_ARG1, Z_ARG1);
2894       z_bre(ic_miss);
2895     }
2896   }
2897   // Compare cached class against klass from receiver.
2898   compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);
2899   z_brne(ic_miss);
2900 }
2901 
2902 void MacroAssembler::check_klass_subtype_fast_path(Register   sub_klass,
2903                                                    Register   super_klass,
2904                                                    Register   temp1_reg,
2905                                                    Label*     L_success,
2906                                                    Label*     L_failure,
2907                                                    Label*     L_slow_path,
2908                                                    RegisterOrConstant super_check_offset) {
2909 
2910   const int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
2911   const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2912 
2913   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2914   bool need_slow_path = (must_load_sco ||
2915                          super_check_offset.constant_or_zero() == sc_offset);
2916 
2917   // Input registers must not overlap.
2918   assert_different_registers(sub_klass, super_klass, temp1_reg);
2919   if (super_check_offset.is_register()) {
2920     assert_different_registers(sub_klass, super_klass,
2921                                super_check_offset.as_register());
2922   } else if (must_load_sco) {
2923     assert(temp1_reg != noreg, "supply either a temp or a register offset");
2924   }
2925 
2926   const Register Rsuper_check_offset = temp1_reg;
2927 
2928   NearLabel L_fallthrough;
2929   int label_nulls = 0;
2930   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
2931   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
2932   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
2933   assert(label_nulls <= 1 ||
2934          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
2935          "at most one NULL in the batch, usually");
2936 
2937   BLOCK_COMMENT("check_klass_subtype_fast_path {");
2938   // If the pointers are equal, we are done (e.g., String[] elements).
2939   // This self-check enables sharing of secondary supertype arrays among
2940   // non-primary types such as array-of-interface. Otherwise, each such
2941   // type would need its own customized SSA.
2942   // We move this check to the front of the fast path because many
2943   // type checks are in fact trivially successful in this manner,
2944   // so we get a nicely predicted branch right at the start of the check.
2945   compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);
2946 
2947   // Check the supertype display, which is uint.
2948   if (must_load_sco) {
2949     z_llgf(Rsuper_check_offset, sco_offset, super_klass);
2950     super_check_offset = RegisterOrConstant(Rsuper_check_offset);
2951   }
2952   Address super_check_addr(sub_klass, super_check_offset, 0);
2953   z_cg(super_klass, super_check_addr); // compare w/ displayed supertype
2954 
2955   // This check has worked decisively for primary supers.
2956   // Secondary supers are sought in the super_cache ('super_cache_addr').
2957   // (Secondary supers are interfaces and very deeply nested subtypes.)
2958   // This works in the same check above because of a tricky aliasing
2959   // between the super_cache and the primary super display elements.
2960   // (The 'super_check_addr' can address either, as the case requires.)
2961   // Note that the cache is updated below if it does not help us find
2962   // what we need immediately.
2963   // So if it was a primary super, we can just fail immediately.
2964   // Otherwise, it's the slow path for us (no success at this point).
2965 
2966   // Hacked jmp, which may only be used just before L_fallthrough.
2967 #define final_jmp(label)                                                \
2968   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
2969   else                            { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/
2970 
2971   if (super_check_offset.is_register()) {
2972     branch_optimized(Assembler::bcondEqual, *L_success);
2973     z_cfi(super_check_offset.as_register(), sc_offset);
2974     if (L_failure == &L_fallthrough) {
2975       branch_optimized(Assembler::bcondEqual, *L_slow_path);
2976     } else {
2977       branch_optimized(Assembler::bcondNotEqual, *L_failure);
2978       final_jmp(*L_slow_path);
2979     }
2980   } else if (super_check_offset.as_constant() == sc_offset) {
2981     // Need a slow path; fast failure is impossible.
2982     if (L_slow_path == &L_fallthrough) {
2983       branch_optimized(Assembler::bcondEqual, *L_success);
2984     } else {
2985       branch_optimized(Assembler::bcondNotEqual, *L_slow_path);
2986       final_jmp(*L_success);
2987     }
2988   } else {
2989     // No slow path; it's a fast decision.
2990     if (L_failure == &L_fallthrough) {
2991       branch_optimized(Assembler::bcondEqual, *L_success);
2992     } else {
2993       branch_optimized(Assembler::bcondNotEqual, *L_failure);
2994       final_jmp(*L_success);
2995     }
2996   }
2997 
2998   bind(L_fallthrough);
2999 #undef local_brc
3000 #undef final_jmp
3001   BLOCK_COMMENT("} check_klass_subtype_fast_path");
3002   // fallthru (to slow path)
3003 }
3004 
3005 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
3006                                                    Register Rsuperklass,
3007                                                    Register Rarray_ptr,  // tmp
3008                                                    Register Rlength,     // tmp
3009                                                    Label* L_success,
3010                                                    Label* L_failure) {
3011   // Input registers must not overlap.
3012   // Also check for R1 which is explicitly used here.
3013   assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
3014   NearLabel L_fallthrough;
3015   int label_nulls = 0;
3016   if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3017   if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3018   assert(label_nulls <= 1, "at most one NULL in the batch");
3019 
3020   const int ss_offset = in_bytes(Klass::secondary_supers_offset());
3021   const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3022 
3023   const int length_offset = Array<Klass*>::length_offset_in_bytes();
3024   const int base_offset   = Array<Klass*>::base_offset_in_bytes();
3025 
3026   // Hacked jmp, which may only be used just before L_fallthrough.
3027 #define final_jmp(label)                                                \
3028   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3029   else                            branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/
3030 
3031   NearLabel loop_iterate, loop_count, match;
3032 
3033   BLOCK_COMMENT("check_klass_subtype_slow_path {");
3034   z_lg(Rarray_ptr, ss_offset, Rsubklass);
3035 
3036   load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));
3037   branch_optimized(Assembler::bcondZero, *L_failure);
3038 
3039   // Oops in table are NO MORE compressed.
3040   z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.
3041   z_bre(match);                               // Shortcut for array length = 1.
3042 
3043   // No match yet, so we must walk the array's elements.
3044   z_lngfr(Rlength, Rlength);
3045   z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array
3046   z_llill(Z_R1, BytesPerWord);               // Set increment/end index.
3047   add2reg(Rlength, 2 * BytesPerWord);        // start index  = -(n-2)*BytesPerWord
3048   z_slgr(Rarray_ptr, Rlength);               // start addr: +=  (n-2)*BytesPerWord
3049   z_bru(loop_count);
3050 
3051   BIND(loop_iterate);
3052   z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.
3053   z_bre(match);
3054   BIND(loop_count);
3055   z_brxlg(Rlength, Z_R1, loop_iterate);
3056 
3057   // Rsuperklass not found among secondary super classes -> failure.
3058   branch_optimized(Assembler::bcondAlways, *L_failure);
3059 
3060   // Got a hit. Return success (zero result). Set cache.
3061   // Cache load doesn't happen here. For speed it is directly emitted by the compiler.
3062 
3063   BIND(match);
3064 
3065   z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.
3066 
3067   final_jmp(*L_success);
3068 
3069   // Exit to the surrounding code.
3070   BIND(L_fallthrough);
3071 #undef local_brc
3072 #undef final_jmp
3073   BLOCK_COMMENT("} check_klass_subtype_slow_path");
3074 }
3075 
3076 // Emitter for combining fast and slow path.
3077 void MacroAssembler::check_klass_subtype(Register sub_klass,
3078                                          Register super_klass,
3079                                          Register temp1_reg,
3080                                          Register temp2_reg,
3081                                          Label&   L_success) {
3082   NearLabel failure;
3083   BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
3084   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
3085                                 &L_success, &failure, NULL);
3086   check_klass_subtype_slow_path(sub_klass, super_klass,
3087                                 temp1_reg, temp2_reg, &L_success, NULL);
3088   BIND(failure);
3089   BLOCK_COMMENT("} check_klass_subtype");
3090 }
3091 
3092 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
3093   assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
3094 
3095   Label L_fallthrough;
3096   if (L_fast_path == NULL) {
3097     L_fast_path = &L_fallthrough;
3098   } else if (L_slow_path == NULL) {
3099     L_slow_path = &L_fallthrough;
3100   }
3101 
3102   // Fast path check: class is fully initialized
3103   z_cli(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3104   z_bre(*L_fast_path);
3105 
3106   // Fast path check: current thread is initializer thread
3107   z_cg(thread, Address(klass, InstanceKlass::init_thread_offset()));
3108   if (L_slow_path == &L_fallthrough) {
3109     z_bre(*L_fast_path);
3110   } else if (L_fast_path == &L_fallthrough) {
3111     z_brne(*L_slow_path);
3112   } else {
3113     Unimplemented();
3114   }
3115 
3116   bind(L_fallthrough);
3117 }
3118 
3119 // Increment a counter at counter_address when the eq condition code is
3120 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
3121 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {
3122   Label l;
3123   z_brne(l);
3124   load_const(tmp1_reg, counter_address);
3125   add2mem_32(Address(tmp1_reg), 1, tmp2_reg);
3126   z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.
3127   bind(l);
3128 }
3129 
3130 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2) {
3131   Register displacedHeader = temp1;
3132   Register currentHeader = temp1;
3133   Register temp = temp2;
3134   NearLabel done, object_has_monitor;
3135 
3136   BLOCK_COMMENT("compiler_fast_lock_object {");
3137 
3138   // Load markWord from oop into mark.
3139   z_lg(displacedHeader, 0, oop);
3140 
3141   if (DiagnoseSyncOnValueBasedClasses != 0) {
3142     load_klass(Z_R1_scratch, oop);
3143     z_l(Z_R1_scratch, Address(Z_R1_scratch, Klass::access_flags_offset()));
3144     assert((JVM_ACC_IS_VALUE_BASED_CLASS & 0xFFFF) == 0, "or change following instruction");
3145     z_nilh(Z_R1_scratch, JVM_ACC_IS_VALUE_BASED_CLASS >> 16);
3146     z_brne(done);
3147   }
3148 
3149   // Handle existing monitor.
3150   // The object has an existing monitor iff (mark & monitor_value) != 0.
3151   guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
3152   z_lr(temp, displacedHeader);
3153   z_nill(temp, markWord::monitor_value);
3154   z_brne(object_has_monitor);
3155 
3156   // Set mark to markWord | markWord::unlocked_value.
3157   z_oill(displacedHeader, markWord::unlocked_value);
3158 
3159   // Load Compare Value application register.
3160 
3161   // Initialize the box (must happen before we update the object mark).
3162   z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
3163 
3164   // Memory Fence (in cmpxchgd)
3165   // Compare object markWord with mark and if equal exchange scratch1 with object markWord.
3166 
3167   // If the compare-and-swap succeeded, then we found an unlocked object and we
3168   // have now locked it.
3169   z_csg(displacedHeader, box, 0, oop);
3170   assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
3171   z_bre(done);
3172 
3173   // We did not see an unlocked object so try the fast recursive case.
3174 
3175   z_sgr(currentHeader, Z_SP);
3176   load_const_optimized(temp, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
3177 
3178   z_ngr(currentHeader, temp);
3179   //   z_brne(done);
3180   //   z_release();
3181   z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3182 
3183   z_bru(done);
3184 
3185   Register zero = temp;
3186   Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
3187   bind(object_has_monitor);
3188   // The object's monitor m is unlocked iff m->owner == NULL,
3189   // otherwise m->owner may contain a thread or a stack address.
3190   //
3191   // Try to CAS m->owner from NULL to current thread.
3192   z_lghi(zero, 0);
3193   // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3194   z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3195   // Store a non-null value into the box.
3196   z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3197 #ifdef ASSERT
3198   z_brne(done);
3199   // We've acquired the monitor, check some invariants.
3200   // Invariant 1: _recursions should be 0.
3201   asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
3202                           "monitor->_recursions should be 0", -1);
3203   z_ltgr(zero, zero); // Set CR=EQ.
3204 #endif
3205   bind(done);
3206 
3207   BLOCK_COMMENT("} compiler_fast_lock_object");
3208   // If locking was successful, CR should indicate 'EQ'.
3209   // The compiler or the native wrapper generates a branch to the runtime call
3210   // _complete_monitor_locking_Java.
3211 }
3212 
3213 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2) {
3214   Register displacedHeader = temp1;
3215   Register currentHeader = temp2;
3216   Register temp = temp1;
3217   Register monitor = temp2;
3218 
3219   Label done, object_has_monitor;
3220 
3221   BLOCK_COMMENT("compiler_fast_unlock_object {");
3222 
3223   // Find the lock address and load the displaced header from the stack.
3224   // if the displaced header is zero, we have a recursive unlock.
3225   load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3226   z_bre(done);
3227 
3228   // Handle existing monitor.
3229   // The object has an existing monitor iff (mark & monitor_value) != 0.
3230   z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
3231   guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
3232   z_nill(currentHeader, markWord::monitor_value);
3233   z_brne(object_has_monitor);
3234 
3235   // Check if it is still a light weight lock, this is true if we see
3236   // the stack address of the basicLock in the markWord of the object
3237   // copy box to currentHeader such that csg does not kill it.
3238   z_lgr(currentHeader, box);
3239   z_csg(currentHeader, displacedHeader, 0, oop);
3240   z_bru(done); // Csg sets CR as desired.
3241 
3242   // Handle existing monitor.
3243   bind(object_has_monitor);
3244   z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);    // CurrentHeader is tagged with monitor_value set.
3245   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3246   z_brne(done);
3247   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3248   z_brne(done);
3249   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3250   z_brne(done);
3251   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3252   z_brne(done);
3253   z_release();
3254   z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3255 
3256   bind(done);
3257 
3258   BLOCK_COMMENT("} compiler_fast_unlock_object");
3259   // flag == EQ indicates success
3260   // flag == NE indicates failure
3261 }
3262 
3263 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3264   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3265   bs->resolve_jobject(this, value, tmp1, tmp2);
3266 }
3267 
3268 // Last_Java_sp must comply to the rules in frame_s390.hpp.
3269 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3270   BLOCK_COMMENT("set_last_Java_frame {");
3271 
3272   // Always set last_Java_pc and flags first because once last_Java_sp
3273   // is visible has_last_Java_frame is true and users will look at the
3274   // rest of the fields. (Note: flags should always be zero before we
3275   // get here so doesn't need to be set.)
3276 
3277   // Verify that last_Java_pc was zeroed on return to Java.
3278   if (allow_relocation) {
3279     asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),
3280                             Z_thread,
3281                             "last_Java_pc not zeroed before leaving Java",
3282                             0x200);
3283   } else {
3284     asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),
3285                                    Z_thread,
3286                                    "last_Java_pc not zeroed before leaving Java",
3287                                    0x200);
3288   }
3289 
3290   // When returning from calling out from Java mode the frame anchor's
3291   // last_Java_pc will always be set to NULL. It is set here so that
3292   // if we are doing a call to native (not VM) that we capture the
3293   // known pc and don't have to rely on the native call having a
3294   // standard frame linkage where we can find the pc.
3295   if (last_Java_pc!=noreg) {
3296     z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));
3297   }
3298 
3299   // This membar release is not required on z/Architecture, since the sequence of stores
3300   // in maintained. Nevertheless, we leave it in to document the required ordering.
3301   // The implementation of z_release() should be empty.
3302   // z_release();
3303 
3304   z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));
3305   BLOCK_COMMENT("} set_last_Java_frame");
3306 }
3307 
3308 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {
3309   BLOCK_COMMENT("reset_last_Java_frame {");
3310 
3311   if (allow_relocation) {
3312     asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
3313                                Z_thread,
3314                                "SP was not set, still zero",
3315                                0x202);
3316   } else {
3317     asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),
3318                                       Z_thread,
3319                                       "SP was not set, still zero",
3320                                       0x202);
3321   }
3322 
3323   // _last_Java_sp = 0
3324   // Clearing storage must be atomic here, so don't use clear_mem()!
3325   store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);
3326 
3327   // _last_Java_pc = 0
3328   store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);
3329 
3330   BLOCK_COMMENT("} reset_last_Java_frame");
3331   return;
3332 }
3333 
3334 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {
3335   assert_different_registers(sp, tmp1);
3336 
3337   // We cannot trust that code generated by the C++ compiler saves R14
3338   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
3339   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
3340   // Therefore we load the PC into tmp1 and let set_last_Java_frame() save
3341   // it into the frame anchor.
3342   get_PC(tmp1);
3343   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);
3344 }
3345 
3346 void MacroAssembler::set_thread_state(JavaThreadState new_state) {
3347   z_release();
3348 
3349   assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");
3350   assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");
3351   store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);
3352 }
3353 
3354 void MacroAssembler::get_vm_result(Register oop_result) {
3355   verify_thread();
3356 
3357   z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3358   clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));
3359 
3360   verify_oop(oop_result, FILE_AND_LINE);
3361 }
3362 
3363 void MacroAssembler::get_vm_result_2(Register result) {
3364   verify_thread();
3365 
3366   z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));
3367   clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));
3368 }
3369 
3370 // We require that C code which does not return a value in vm_result will
3371 // leave it undisturbed.
3372 void MacroAssembler::set_vm_result(Register oop_result) {
3373   z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3374 }
3375 
3376 // Explicit null checks (used for method handle code).
3377 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
3378   if (!ImplicitNullChecks) {
3379     NearLabel ok;
3380 
3381     compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);
3382 
3383     // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).
3384     address exception_entry = Interpreter::throw_NullPointerException_entry();
3385     load_absolute_address(reg, exception_entry);
3386     z_br(reg);
3387 
3388     bind(ok);
3389   } else {
3390     if (needs_explicit_null_check((intptr_t)offset)) {
3391       // Provoke OS NULL exception if reg = NULL by
3392       // accessing M[reg] w/o changing any registers.
3393       z_lg(tmp, 0, reg);
3394     }
3395     // else
3396       // Nothing to do, (later) access of M[reg + offset]
3397       // will provoke OS NULL exception if reg = NULL.
3398   }
3399 }
3400 
3401 //-------------------------------------
3402 //  Compressed Klass Pointers
3403 //-------------------------------------
3404 
3405 // Klass oop manipulations if compressed.
3406 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3407   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3408   address  base    = CompressedKlassPointers::base();
3409   int      shift   = CompressedKlassPointers::shift();
3410   bool     need_zero_extend = base != 0;
3411   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3412 
3413   BLOCK_COMMENT("cKlass encoder {");
3414 
3415 #ifdef ASSERT
3416   Label ok;
3417   z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3418   z_brc(Assembler::bcondAllZero, ok);
3419   // The plain disassembler does not recognize illtrap. It instead displays
3420   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3421   // the proper beginning of the next instruction.
3422   z_illtrap(0xee);
3423   z_illtrap(0xee);
3424   bind(ok);
3425 #endif
3426 
3427   // Scale down the incoming klass pointer first.
3428   // We then can be sure we calculate an offset that fits into 32 bit.
3429   // More generally speaking: all subsequent calculations are purely 32-bit.
3430   if (shift != 0) {
3431     assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3432     z_srlg(dst, current, shift);
3433     current = dst;
3434   }
3435 
3436   if (base != NULL) {
3437     // Use scaled-down base address parts to match scaled-down klass pointer.
3438     unsigned int base_h = ((unsigned long)base)>>(32+shift);
3439     unsigned int base_l = (unsigned int)(((unsigned long)base)>>shift);
3440 
3441     // General considerations:
3442     //  - when calculating (current_h - base_h), all digits must cancel (become 0).
3443     //    Otherwise, we would end up with a compressed klass pointer which doesn't
3444     //    fit into 32-bit.
3445     //  - Only bit#33 of the difference could potentially be non-zero. For that
3446     //    to happen, (current_l < base_l) must hold. In this case, the subtraction
3447     //    will create a borrow out of bit#32, nicely killing bit#33.
3448     //  - With the above, we only need to consider current_l and base_l to
3449     //    calculate the result.
3450     //  - Both values are treated as unsigned. The unsigned subtraction is
3451     //    replaced by adding (unsigned) the 2's complement of the subtrahend.
3452 
3453     if (base_l == 0) {
3454       //  - By theory, the calculation to be performed here (current_h - base_h) MUST
3455       //    cancel all high-word bits. Otherwise, we would end up with an offset
3456       //    (i.e. compressed klass pointer) that does not fit into 32 bit.
3457       //  - current_l remains unchanged.
3458       //  - Therefore, we can replace all calculation with just a
3459       //    zero-extending load 32 to 64 bit.
3460       //  - Even that can be replaced with a conditional load if dst != current.
3461       //    (this is a local view. The shift step may have requested zero-extension).
3462     } else {
3463       if ((base_h == 0) && is_uimm(base_l, 31)) {
3464         // If we happen to find that (base_h == 0), and that base_l is within the range
3465         // which can be represented by a signed int, then we can use 64bit signed add with
3466         // (-base_l) as 32bit signed immediate operand. The add will take care of the
3467         // upper 32 bits of the result, saving us the need of an extra zero extension.
3468         // For base_l to be in the required range, it must not have the most significant
3469         // bit (aka sign bit) set.
3470         lgr_if_needed(dst, current); // no zero/sign extension in this case!
3471         z_agfi(dst, -(int)base_l);   // base_l must be passed as signed.
3472         need_zero_extend = false;
3473         current = dst;
3474       } else {
3475         // To begin with, we may need to copy and/or zero-extend the register operand.
3476         // We have to calculate (current_l - base_l). Because there is no unsigend
3477         // subtract instruction with immediate operand, we add the 2's complement of base_l.
3478         if (need_zero_extend) {
3479           z_llgfr(dst, current);
3480           need_zero_extend = false;
3481         } else {
3482           llgfr_if_needed(dst, current);
3483         }
3484         current = dst;
3485         z_alfi(dst, -base_l);
3486       }
3487     }
3488   }
3489 
3490   if (need_zero_extend) {
3491     // We must zero-extend the calculated result. It may have some leftover bits in
3492     // the hi-word because we only did optimized calculations.
3493     z_llgfr(dst, current);
3494   } else {
3495     llgfr_if_needed(dst, current); // zero-extension while copying comes at no extra cost.
3496   }
3497 
3498   BLOCK_COMMENT("} cKlass encoder");
3499 }
3500 
3501 // This function calculates the size of the code generated by
3502 //   decode_klass_not_null(register dst, Register src)
3503 // when (Universe::heap() != NULL). Hence, if the instructions
3504 // it generates change, then this method needs to be updated.
3505 int MacroAssembler::instr_size_for_decode_klass_not_null() {
3506   address  base    = CompressedKlassPointers::base();
3507   int shift_size   = CompressedKlassPointers::shift() == 0 ? 0 : 6; /* sllg */
3508   int addbase_size = 0;
3509   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3510 
3511   if (base != NULL) {
3512     unsigned int base_h = ((unsigned long)base)>>32;
3513     unsigned int base_l = (unsigned int)((unsigned long)base);
3514     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3515       addbase_size += 6; /* aih */
3516     } else if ((base_h == 0) && (base_l != 0)) {
3517       addbase_size += 6; /* algfi */
3518     } else {
3519       addbase_size += load_const_size();
3520       addbase_size += 4; /* algr */
3521     }
3522   }
3523 #ifdef ASSERT
3524   addbase_size += 10;
3525   addbase_size += 2; // Extra sigill.
3526 #endif
3527   return addbase_size + shift_size;
3528 }
3529 
3530 // !!! If the instructions that get generated here change
3531 //     then function instr_size_for_decode_klass_not_null()
3532 //     needs to get updated.
3533 // This variant of decode_klass_not_null() must generate predictable code!
3534 // The code must only depend on globally known parameters.
3535 void MacroAssembler::decode_klass_not_null(Register dst) {
3536   address  base    = CompressedKlassPointers::base();
3537   int      shift   = CompressedKlassPointers::shift();
3538   int      beg_off = offset();
3539   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3540 
3541   BLOCK_COMMENT("cKlass decoder (const size) {");
3542 
3543   if (shift != 0) { // Shift required?
3544     z_sllg(dst, dst, shift);
3545   }
3546   if (base != NULL) {
3547     unsigned int base_h = ((unsigned long)base)>>32;
3548     unsigned int base_l = (unsigned int)((unsigned long)base);
3549     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3550       z_aih(dst, base_h);     // Base has no set bits in lower half.
3551     } else if ((base_h == 0) && (base_l != 0)) {
3552       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3553     } else {
3554       load_const(Z_R0, base); // Base has set bits everywhere.
3555       z_algr(dst, Z_R0);
3556     }
3557   }
3558 
3559 #ifdef ASSERT
3560   Label ok;
3561   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3562   z_brc(Assembler::bcondAllZero, ok);
3563   // The plain disassembler does not recognize illtrap. It instead displays
3564   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3565   // the proper beginning of the next instruction.
3566   z_illtrap(0xd1);
3567   z_illtrap(0xd1);
3568   bind(ok);
3569 #endif
3570   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
3571 
3572   BLOCK_COMMENT("} cKlass decoder (const size)");
3573 }
3574 
3575 // This variant of decode_klass_not_null() is for cases where
3576 //  1) the size of the generated instructions may vary
3577 //  2) the result is (potentially) stored in a register different from the source.
3578 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3579   address base  = CompressedKlassPointers::base();
3580   int     shift = CompressedKlassPointers::shift();
3581   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3582 
3583   BLOCK_COMMENT("cKlass decoder {");
3584 
3585   if (src == noreg) src = dst;
3586 
3587   if (shift != 0) { // Shift or at least move required?
3588     z_sllg(dst, src, shift);
3589   } else {
3590     lgr_if_needed(dst, src);
3591   }
3592 
3593   if (base != NULL) {
3594     unsigned int base_h = ((unsigned long)base)>>32;
3595     unsigned int base_l = (unsigned int)((unsigned long)base);
3596     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3597       z_aih(dst, base_h);     // Base has not set bits in lower half.
3598     } else if ((base_h == 0) && (base_l != 0)) {
3599       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3600     } else {
3601       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
3602       z_algr(dst, Z_R0);
3603     }
3604   }
3605 
3606 #ifdef ASSERT
3607   Label ok;
3608   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3609   z_brc(Assembler::bcondAllZero, ok);
3610   // The plain disassembler does not recognize illtrap. It instead displays
3611   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3612   // the proper beginning of the next instruction.
3613   z_illtrap(0xd2);
3614   z_illtrap(0xd2);
3615   bind(ok);
3616 #endif
3617   BLOCK_COMMENT("} cKlass decoder");
3618 }
3619 
3620 void MacroAssembler::load_klass(Register klass, Address mem) {
3621   if (UseCompressedClassPointers) {
3622     z_llgf(klass, mem);
3623     // Attention: no null check here!
3624     decode_klass_not_null(klass);
3625   } else {
3626     z_lg(klass, mem);
3627   }
3628 }
3629 
3630 void MacroAssembler::load_klass(Register klass, Register src_oop) {
3631   if (UseCompressedClassPointers) {
3632     z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3633     // Attention: no null check here!
3634     decode_klass_not_null(klass);
3635   } else {
3636     z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3637   }
3638 }
3639 
3640 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
3641   if (UseCompressedClassPointers) {
3642     assert_different_registers(dst_oop, klass, Z_R0);
3643     if (ck == noreg) ck = klass;
3644     encode_klass_not_null(ck, klass);
3645     z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3646   } else {
3647     z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3648   }
3649 }
3650 
3651 void MacroAssembler::store_klass_gap(Register s, Register d) {
3652   if (UseCompressedClassPointers) {
3653     assert(s != d, "not enough registers");
3654     // Support s = noreg.
3655     if (s != noreg) {
3656       z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
3657     } else {
3658       z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
3659     }
3660   }
3661 }
3662 
3663 // Compare klass ptr in memory against klass ptr in register.
3664 //
3665 // Rop1            - klass in register, always uncompressed.
3666 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
3667 // Rbase           - Base address of cKlass in memory.
3668 // maybeNULL       - True if Rop1 possibly is a NULL.
3669 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {
3670 
3671   BLOCK_COMMENT("compare klass ptr {");
3672 
3673   if (UseCompressedClassPointers) {
3674     const int shift = CompressedKlassPointers::shift();
3675     address   base  = CompressedKlassPointers::base();
3676 
3677     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
3678     assert_different_registers(Rop1, Z_R0);
3679     assert_different_registers(Rop1, Rbase, Z_R1);
3680 
3681     // First encode register oop and then compare with cOop in memory.
3682     // This sequence saves an unnecessary cOop load and decode.
3683     if (base == NULL) {
3684       if (shift == 0) {
3685         z_cl(Rop1, disp, Rbase);     // Unscaled
3686       } else {
3687         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
3688         z_cl(Z_R0, disp, Rbase);
3689       }
3690     } else {                         // HeapBased
3691 #ifdef ASSERT
3692       bool     used_R0 = true;
3693       bool     used_R1 = true;
3694 #endif
3695       Register current = Rop1;
3696       Label    done;
3697 
3698       if (maybeNULL) {       // NULL ptr must be preserved!
3699         z_ltgr(Z_R0, current);
3700         z_bre(done);
3701         current = Z_R0;
3702       }
3703 
3704       unsigned int base_h = ((unsigned long)base)>>32;
3705       unsigned int base_l = (unsigned int)((unsigned long)base);
3706       if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3707         lgr_if_needed(Z_R0, current);
3708         z_aih(Z_R0, -((int)base_h));     // Base has no set bits in lower half.
3709       } else if ((base_h == 0) && (base_l != 0)) {
3710         lgr_if_needed(Z_R0, current);
3711         z_agfi(Z_R0, -(int)base_l);
3712       } else {
3713         int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
3714         add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
3715       }
3716 
3717       if (shift != 0) {
3718         z_srlg(Z_R0, Z_R0, shift);
3719       }
3720       bind(done);
3721       z_cl(Z_R0, disp, Rbase);
3722 #ifdef ASSERT
3723       if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
3724       if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
3725 #endif
3726     }
3727   } else {
3728     z_clg(Rop1, disp, Z_R0, Rbase);
3729   }
3730   BLOCK_COMMENT("} compare klass ptr");
3731 }
3732 
3733 //---------------------------
3734 //  Compressed oops
3735 //---------------------------
3736 
3737 void MacroAssembler::encode_heap_oop(Register oop) {
3738   oop_encoder(oop, oop, true /*maybe null*/);
3739 }
3740 
3741 void MacroAssembler::encode_heap_oop_not_null(Register oop) {
3742   oop_encoder(oop, oop, false /*not null*/);
3743 }
3744 
3745 // Called with something derived from the oop base. e.g. oop_base>>3.
3746 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {
3747   unsigned int oop_base_ll = ((unsigned int)(oop_base >>  0)) & 0xffff;
3748   unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;
3749   unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;
3750   unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;
3751   unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)
3752                                + (oop_base_lh == 0 ? 0:1)
3753                                + (oop_base_hl == 0 ? 0:1)
3754                                + (oop_base_hh == 0 ? 0:1);
3755 
3756   assert(oop_base != 0, "This is for HeapBased cOops only");
3757 
3758   if (n_notzero_parts != 1) { //  Check if oop_base is just a few pages shy of a power of 2.
3759     uint64_t pow2_offset = 0x10000 - oop_base_ll;
3760     if (pow2_offset < 0x8000) {  // This might not be necessary.
3761       uint64_t oop_base2 = oop_base + pow2_offset;
3762 
3763       oop_base_ll = ((unsigned int)(oop_base2 >>  0)) & 0xffff;
3764       oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;
3765       oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;
3766       oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;
3767       n_notzero_parts = (oop_base_ll == 0 ? 0:1) +
3768                         (oop_base_lh == 0 ? 0:1) +
3769                         (oop_base_hl == 0 ? 0:1) +
3770                         (oop_base_hh == 0 ? 0:1);
3771       if (n_notzero_parts == 1) {
3772         assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");
3773         return -pow2_offset;
3774       }
3775     }
3776   }
3777   return 0;
3778 }
3779 
3780 // If base address is offset from a straight power of two by just a few pages,
3781 // return this offset to the caller for a possible later composite add.
3782 // TODO/FIX: will only work correctly for 4k pages.
3783 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {
3784   int pow2_offset = get_oop_base_pow2_offset(oop_base);
3785 
3786   load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.
3787 
3788   return pow2_offset;
3789 }
3790 
3791 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
3792   int offset = get_oop_base(Rbase, oop_base);
3793   z_lcgr(Rbase, Rbase);
3794   return -offset;
3795 }
3796 
3797 // Compare compressed oop in memory against oop in register.
3798 // Rop1            - Oop in register.
3799 // disp            - Offset of cOop in memory.
3800 // Rbase           - Base address of cOop in memory.
3801 // maybeNULL       - True if Rop1 possibly is a NULL.
3802 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.
3803 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {
3804   Register Rbase  = mem.baseOrR0();
3805   Register Rindex = mem.indexOrR0();
3806   int64_t  disp   = mem.disp();
3807 
3808   const int shift = CompressedOops::shift();
3809   address   base  = CompressedOops::base();
3810 
3811   assert(UseCompressedOops, "must be on to call this method");
3812   assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
3813   assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
3814   assert_different_registers(Rop1, Z_R0);
3815   assert_different_registers(Rop1, Rbase, Z_R1);
3816   assert_different_registers(Rop1, Rindex, Z_R1);
3817 
3818   BLOCK_COMMENT("compare heap oop {");
3819 
3820   // First encode register oop and then compare with cOop in memory.
3821   // This sequence saves an unnecessary cOop load and decode.
3822   if (base == NULL) {
3823     if (shift == 0) {
3824       z_cl(Rop1, disp, Rindex, Rbase);  // Unscaled
3825     } else {
3826       z_srlg(Z_R0, Rop1, shift);        // ZeroBased
3827       z_cl(Z_R0, disp, Rindex, Rbase);
3828     }
3829   } else {                              // HeapBased
3830 #ifdef ASSERT
3831     bool  used_R0 = true;
3832     bool  used_R1 = true;
3833 #endif
3834     Label done;
3835     int   pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
3836 
3837     if (maybeNULL) {       // NULL ptr must be preserved!
3838       z_ltgr(Z_R0, Rop1);
3839       z_bre(done);
3840     }
3841 
3842     add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);
3843     z_srlg(Z_R0, Z_R0, shift);
3844 
3845     bind(done);
3846     z_cl(Z_R0, disp, Rindex, Rbase);
3847 #ifdef ASSERT
3848     if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
3849     if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
3850 #endif
3851   }
3852   BLOCK_COMMENT("} compare heap oop");
3853 }
3854 
3855 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
3856                                      const Address& addr, Register val,
3857                                      Register tmp1, Register tmp2, Register tmp3) {
3858   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
3859                          ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
3860   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3861   decorators = AccessInternal::decorator_fixup(decorators);
3862   bool as_raw = (decorators & AS_RAW) != 0;
3863   if (as_raw) {
3864     bs->BarrierSetAssembler::store_at(this, decorators, type,
3865                                       addr, val,
3866                                       tmp1, tmp2, tmp3);
3867   } else {
3868     bs->store_at(this, decorators, type,
3869                  addr, val,
3870                  tmp1, tmp2, tmp3);
3871   }
3872 }
3873 
3874 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
3875                                     const Address& addr, Register dst,
3876                                     Register tmp1, Register tmp2, Label *is_null) {
3877   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
3878                          ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");
3879   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3880   decorators = AccessInternal::decorator_fixup(decorators);
3881   bool as_raw = (decorators & AS_RAW) != 0;
3882   if (as_raw) {
3883     bs->BarrierSetAssembler::load_at(this, decorators, type,
3884                                      addr, dst,
3885                                      tmp1, tmp2, is_null);
3886   } else {
3887     bs->load_at(this, decorators, type,
3888                 addr, dst,
3889                 tmp1, tmp2, is_null);
3890   }
3891 }
3892 
3893 void MacroAssembler::load_heap_oop(Register dest, const Address &a,
3894                                    Register tmp1, Register tmp2,
3895                                    DecoratorSet decorators, Label *is_null) {
3896   access_load_at(T_OBJECT, IN_HEAP | decorators, a, dest, tmp1, tmp2, is_null);
3897 }
3898 
3899 void MacroAssembler::store_heap_oop(Register Roop, const Address &a,
3900                                     Register tmp1, Register tmp2, Register tmp3,
3901                                     DecoratorSet decorators) {
3902   access_store_at(T_OBJECT, IN_HEAP | decorators, a, Roop, tmp1, tmp2, tmp3);
3903 }
3904 
3905 //-------------------------------------------------
3906 // Encode compressed oop. Generally usable encoder.
3907 //-------------------------------------------------
3908 // Rsrc - contains regular oop on entry. It remains unchanged.
3909 // Rdst - contains compressed oop on exit.
3910 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.
3911 //
3912 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.
3913 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.
3914 //
3915 // only32bitValid is set, if later code only uses the lower 32 bits. In this
3916 // case we must not fix the upper 32 bits.
3917 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
3918                                  Register Rbase, int pow2_offset, bool only32bitValid) {
3919 
3920   const address oop_base  = CompressedOops::base();
3921   const int     oop_shift = CompressedOops::shift();
3922   const bool    disjoint  = CompressedOops::base_disjoint();
3923 
3924   assert(UseCompressedOops, "must be on to call this method");
3925   assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
3926   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
3927 
3928   if (disjoint || (oop_base == NULL)) {
3929     BLOCK_COMMENT("cOop encoder zeroBase {");
3930     if (oop_shift == 0) {
3931       if (oop_base != NULL && !only32bitValid) {
3932         z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
3933       } else {
3934         lgr_if_needed(Rdst, Rsrc);
3935       }
3936     } else {
3937       z_srlg(Rdst, Rsrc, oop_shift);
3938       if (oop_base != NULL && !only32bitValid) {
3939         z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
3940       }
3941     }
3942     BLOCK_COMMENT("} cOop encoder zeroBase");
3943     return;
3944   }
3945 
3946   bool used_R0 = false;
3947   bool used_R1 = false;
3948 
3949   BLOCK_COMMENT("cOop encoder general {");
3950   assert_different_registers(Rdst, Z_R1);
3951   assert_different_registers(Rsrc, Rbase);
3952   if (maybeNULL) {
3953     Label done;
3954     // We reorder shifting and subtracting, so that we can compare
3955     // and shift in parallel:
3956     //
3957     // cycle 0:  potential LoadN, base = <const>
3958     // cycle 1:  base = !base     dst = src >> 3,    cmp cr = (src != 0)
3959     // cycle 2:  if (cr) br,      dst = dst + base + offset
3960 
3961     // Get oop_base components.
3962     if (pow2_offset == -1) {
3963       if (Rdst == Rbase) {
3964         if (Rdst == Z_R1 || Rsrc == Z_R1) {
3965           Rbase = Z_R0;
3966           used_R0 = true;
3967         } else {
3968           Rdst = Z_R1;
3969           used_R1 = true;
3970         }
3971       }
3972       if (Rbase == Z_R1) {
3973         used_R1 = true;
3974       }
3975       pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);
3976     }
3977     assert_different_registers(Rdst, Rbase);
3978 
3979     // Check for NULL oop (must be left alone) and shift.
3980     if (oop_shift != 0) {  // Shift out alignment bits
3981       if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
3982         z_srag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
3983       } else {
3984         z_srlg(Rdst, Rsrc, oop_shift);
3985         z_ltgr(Rsrc, Rsrc);  // This is the recommended way of testing for zero.
3986         // This probably is faster, as it does not write a register. No!
3987         // z_cghi(Rsrc, 0);
3988       }
3989     } else {
3990       z_ltgr(Rdst, Rsrc);   // Move NULL to result register.
3991     }
3992     z_bre(done);
3993 
3994     // Subtract oop_base components.
3995     if ((Rdst == Z_R0) || (Rbase == Z_R0)) {
3996       z_algr(Rdst, Rbase);
3997       if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }
3998     } else {
3999       add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);
4000     }
4001     if (!only32bitValid) {
4002       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4003     }
4004     bind(done);
4005 
4006   } else {  // not null
4007     // Get oop_base components.
4008     if (pow2_offset == -1) {
4009       pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);
4010     }
4011 
4012     // Subtract oop_base components and shift.
4013     if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {
4014       // Don't use lay instruction.
4015       if (Rdst == Rsrc) {
4016         z_algr(Rdst, Rbase);
4017       } else {
4018         lgr_if_needed(Rdst, Rbase);
4019         z_algr(Rdst, Rsrc);
4020       }
4021       if (pow2_offset != 0) add2reg(Rdst, pow2_offset);
4022     } else {
4023       add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);
4024     }
4025     if (oop_shift != 0) {   // Shift out alignment bits.
4026       z_srlg(Rdst, Rdst, oop_shift);
4027     }
4028     if (!only32bitValid) {
4029       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4030     }
4031   }
4032 #ifdef ASSERT
4033   if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }
4034   if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }
4035 #endif
4036   BLOCK_COMMENT("} cOop encoder general");
4037 }
4038 
4039 //-------------------------------------------------
4040 // decode compressed oop. Generally usable decoder.
4041 //-------------------------------------------------
4042 // Rsrc - contains compressed oop on entry.
4043 // Rdst - contains regular oop on exit.
4044 // Rdst and Rsrc may indicate same register.
4045 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).
4046 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.
4047 // Rbase - register to use for the base
4048 // pow2_offset - offset of base to nice value. If -1, base must be loaded.
4049 // For performance, it is good to
4050 //  - avoid Z_R0 for any of the argument registers.
4051 //  - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
4052 //  - avoid Z_R1 for Rdst if Rdst == Rbase.
4053 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
4054 
4055   const address oop_base  = CompressedOops::base();
4056   const int     oop_shift = CompressedOops::shift();
4057   const bool    disjoint  = CompressedOops::base_disjoint();
4058 
4059   assert(UseCompressedOops, "must be on to call this method");
4060   assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
4061   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
4062          "cOop encoder detected bad shift");
4063 
4064   // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
4065 
4066   if (oop_base != NULL) {
4067     unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
4068     unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
4069     unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
4070     if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {
4071       BLOCK_COMMENT("cOop decoder disjointBase {");
4072       // We do not need to load the base. Instead, we can install the upper bits
4073       // with an OR instead of an ADD.
4074       Label done;
4075 
4076       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4077       if (maybeNULL) {  // NULL ptr must be preserved!
4078         z_slag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4079         z_bre(done);
4080       } else {
4081         z_sllg(Rdst, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4082       }
4083       if ((oop_base_hl != 0) && (oop_base_hh != 0)) {
4084         z_oihf(Rdst, oop_base_hf);
4085       } else if (oop_base_hl != 0) {
4086         z_oihl(Rdst, oop_base_hl);
4087       } else {
4088         assert(oop_base_hh != 0, "not heapbased mode");
4089         z_oihh(Rdst, oop_base_hh);
4090       }
4091       bind(done);
4092       BLOCK_COMMENT("} cOop decoder disjointBase");
4093     } else {
4094       BLOCK_COMMENT("cOop decoder general {");
4095       // There are three decode steps:
4096       //   scale oop offset (shift left)
4097       //   get base (in reg) and pow2_offset (constant)
4098       //   add base, pow2_offset, and oop offset
4099       // The following register overlap situations may exist:
4100       // Rdst == Rsrc,  Rbase any other
4101       //   not a problem. Scaling in-place leaves Rbase undisturbed.
4102       //   Loading Rbase does not impact the scaled offset.
4103       // Rdst == Rbase, Rsrc  any other
4104       //   scaling would destroy a possibly preloaded Rbase. Loading Rbase
4105       //   would destroy the scaled offset.
4106       //   Remedy: use Rdst_tmp if Rbase has been preloaded.
4107       //           use Rbase_tmp if base has to be loaded.
4108       // Rsrc == Rbase, Rdst  any other
4109       //   Only possible without preloaded Rbase.
4110       //   Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.
4111       // Rsrc == Rbase, Rdst == Rbase
4112       //   Only possible without preloaded Rbase.
4113       //   Loading Rbase would destroy compressed oop. Scaling in-place is ok.
4114       //   Remedy: use Rbase_tmp.
4115       //
4116       Label    done;
4117       Register Rdst_tmp       = Rdst;
4118       Register Rbase_tmp      = Rbase;
4119       bool     used_R0        = false;
4120       bool     used_R1        = false;
4121       bool     base_preloaded = pow2_offset >= 0;
4122       guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");
4123       assert(oop_shift != 0, "room for optimization");
4124 
4125       // Check if we need to use scratch registers.
4126       if (Rdst == Rbase) {
4127         assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");
4128         if (Rdst != Rsrc) {
4129           if (base_preloaded) { Rdst_tmp  = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4130           else                { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4131         } else {
4132           Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;
4133         }
4134       }
4135       if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
4136 
4137       // Scale oop and check for NULL.
4138       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4139       if (maybeNULL) {  // NULL ptr must be preserved!
4140         z_slag(Rdst_tmp, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4141         z_bre(done);
4142       } else {
4143         z_sllg(Rdst_tmp, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4144       }
4145 
4146       // Get oop_base components.
4147       if (!base_preloaded) {
4148         pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);
4149       }
4150 
4151       // Add up all components.
4152       if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {
4153         z_algr(Rdst_tmp, Rbase_tmp);
4154         if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }
4155       } else {
4156         add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);
4157       }
4158 
4159       bind(done);
4160       lgr_if_needed(Rdst, Rdst_tmp);
4161 #ifdef ASSERT
4162       if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }
4163       if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }
4164 #endif
4165       BLOCK_COMMENT("} cOop decoder general");
4166     }
4167   } else {
4168     BLOCK_COMMENT("cOop decoder zeroBase {");
4169     if (oop_shift == 0) {
4170       lgr_if_needed(Rdst, Rsrc);
4171     } else {
4172       z_sllg(Rdst, Rsrc, oop_shift);
4173     }
4174     BLOCK_COMMENT("} cOop decoder zeroBase");
4175   }
4176 }
4177 
4178 // ((OopHandle)result).resolve();
4179 void MacroAssembler::resolve_oop_handle(Register result) {
4180   // OopHandle::resolve is an indirection.
4181   z_lg(result, 0, result);
4182 }
4183 
4184 void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) {
4185   mem2reg_opt(mirror, Address(const_method, ConstMethod::constants_offset()));
4186   mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
4187   mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
4188   resolve_oop_handle(mirror);
4189 }
4190 
4191 void MacroAssembler::load_method_holder(Register holder, Register method) {
4192   mem2reg_opt(holder, Address(method, Method::const_offset()));
4193   mem2reg_opt(holder, Address(holder, ConstMethod::constants_offset()));
4194   mem2reg_opt(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes()));
4195 }
4196 
4197 //---------------------------------------------------------------
4198 //---  Operations on arrays.
4199 //---------------------------------------------------------------
4200 
4201 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4202 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4203 // work registers anyway.
4204 // Actually, only r0, r1, and r5 are killed.
4205 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg) {
4206 
4207   int      block_start = offset();
4208   Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
4209   Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
4210 
4211   Label doXC, doMVCLE, done;
4212 
4213   BLOCK_COMMENT("Clear_Array {");
4214 
4215   // Check for zero len and convert to long.
4216   z_ltgfr(odd_tmp_reg, cnt_arg);
4217   z_bre(done);                    // Nothing to do if len == 0.
4218 
4219   // Prefetch data to be cleared.
4220   if (VM_Version::has_Prefetch()) {
4221     z_pfd(0x02,   0, Z_R0, base_pointer_arg);
4222     z_pfd(0x02, 256, Z_R0, base_pointer_arg);
4223   }
4224 
4225   z_sllg(dst_len, odd_tmp_reg, 3); // #bytes to clear.
4226   z_cghi(odd_tmp_reg, 32);         // Check for len <= 256 bytes (<=32 DW).
4227   z_brnh(doXC);                    // If so, use executed XC to clear.
4228 
4229   // MVCLE: initialize long arrays (general case).
4230   bind(doMVCLE);
4231   z_lgr(dst_addr, base_pointer_arg);
4232   // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
4233   // The even register of the register pair is not killed.
4234   clear_reg(odd_tmp_reg, true, false);
4235   MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding()-1), 0);
4236   z_bru(done);
4237 
4238   // XC: initialize short arrays.
4239   Label XC_template; // Instr template, never exec directly!
4240     bind(XC_template);
4241     z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
4242 
4243   bind(doXC);
4244     add2reg(dst_len, -1);               // Get #bytes-1 for EXECUTE.
4245     if (VM_Version::has_ExecuteExtensions()) {
4246       z_exrl(dst_len, XC_template);     // Execute XC with var. len.
4247     } else {
4248       z_larl(odd_tmp_reg, XC_template);
4249       z_ex(dst_len,0,Z_R0,odd_tmp_reg); // Execute XC with var. len.
4250     }
4251     // z_bru(done);      // fallthru
4252 
4253   bind(done);
4254 
4255   BLOCK_COMMENT("} Clear_Array");
4256 
4257   int block_end = offset();
4258   return block_end - block_start;
4259 }
4260 
4261 // Compiler ensures base is doubleword aligned and cnt is count of doublewords.
4262 // Emitter does not KILL any arguments nor work registers.
4263 // Emitter generates up to 16 XC instructions, depending on the array length.
4264 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {
4265   int  block_start    = offset();
4266   int  off;
4267   int  lineSize_Bytes = AllocatePrefetchStepSize;
4268   int  lineSize_DW    = AllocatePrefetchStepSize>>LogBytesPerWord;
4269   bool doPrefetch     = VM_Version::has_Prefetch();
4270   int  XC_maxlen      = 256;
4271   int  numXCInstr     = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;
4272 
4273   BLOCK_COMMENT("Clear_Array_Const {");
4274   assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");
4275 
4276   // Do less prefetching for very short arrays.
4277   if (numXCInstr > 0) {
4278     // Prefetch only some cache lines, then begin clearing.
4279     if (doPrefetch) {
4280       if (cnt*BytesPerWord <= lineSize_Bytes/4) {  // If less than 1/4 of a cache line to clear,
4281         z_pfd(0x02, 0, Z_R0, base);                // prefetch just the first cache line.
4282       } else {
4283         assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");
4284         for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {
4285           z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);
4286         }
4287       }
4288     }
4289 
4290     for (off=0; off<(numXCInstr-1); off++) {
4291       z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);
4292 
4293       // Prefetch some cache lines in advance.
4294       if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {
4295         z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);
4296       }
4297     }
4298     if (off*XC_maxlen < cnt*BytesPerWord) {
4299       z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);
4300     }
4301   }
4302   BLOCK_COMMENT("} Clear_Array_Const");
4303 
4304   int block_end = offset();
4305   return block_end - block_start;
4306 }
4307 
4308 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4309 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4310 // work registers anyway.
4311 // Actually, only r0, r1, (which are work registers) and odd_tmp_reg are killed.
4312 //
4313 // For very large arrays, exploit MVCLE H/W support.
4314 // MVCLE instruction automatically exploits H/W-optimized page mover.
4315 // - Bytes up to next page boundary are cleared with a series of XC to self.
4316 // - All full pages are cleared with the page mover H/W assist.
4317 // - Remaining bytes are again cleared by a series of XC to self.
4318 //
4319 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg) {
4320 
4321   int      block_start = offset();
4322   Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
4323   Register dst_addr = Z_R0;      // Holds dst addr for MVCLE.
4324 
4325   BLOCK_COMMENT("Clear_Array_Const_Big {");
4326 
4327   // Get len to clear.
4328   load_const_optimized(dst_len, (long)cnt*8L);  // in Bytes = #DW*8
4329 
4330   // Prepare other args to MVCLE.
4331   z_lgr(dst_addr, base_pointer_arg);
4332   // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
4333   // The even register of the register pair is not killed.
4334   (void) clear_reg(odd_tmp_reg, true, false);  // Src len of MVCLE is zero.
4335   MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding() - 1), 0);
4336   BLOCK_COMMENT("} Clear_Array_Const_Big");
4337 
4338   int block_end = offset();
4339   return block_end - block_start;
4340 }
4341 
4342 // Allocator.
4343 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
4344                                                            Register cnt_reg,
4345                                                            Register tmp1_reg, Register tmp2_reg) {
4346   // Tmp1 is oddReg.
4347   // Tmp2 is evenReg.
4348 
4349   int block_start = offset();
4350   Label doMVC, doMVCLE, done, MVC_template;
4351 
4352   BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");
4353 
4354   // Check for zero len and convert to long.
4355   z_ltgfr(cnt_reg, cnt_reg);      // Remember casted value for doSTG case.
4356   z_bre(done);                    // Nothing to do if len == 0.
4357 
4358   z_sllg(Z_R1, cnt_reg, 3);       // Dst len in bytes. calc early to have the result ready.
4359 
4360   z_cghi(cnt_reg, 32);            // Check for len <= 256 bytes (<=32 DW).
4361   z_brnh(doMVC);                  // If so, use executed MVC to clear.
4362 
4363   bind(doMVCLE);                  // A lot of data (more than 256 bytes).
4364   // Prep dest reg pair.
4365   z_lgr(Z_R0, dst_reg);           // dst addr
4366   // Dst len already in Z_R1.
4367   // Prep src reg pair.
4368   z_lgr(tmp2_reg, src_reg);       // src addr
4369   z_lgr(tmp1_reg, Z_R1);          // Src len same as dst len.
4370 
4371   // Do the copy.
4372   move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.
4373   z_bru(done);                         // All done.
4374 
4375   bind(MVC_template);             // Just some data (not more than 256 bytes).
4376   z_mvc(0, 0, dst_reg, 0, src_reg);
4377 
4378   bind(doMVC);
4379 
4380   if (VM_Version::has_ExecuteExtensions()) {
4381     add2reg(Z_R1, -1);
4382   } else {
4383     add2reg(tmp1_reg, -1, Z_R1);
4384     z_larl(Z_R1, MVC_template);
4385   }
4386 
4387   if (VM_Version::has_Prefetch()) {
4388     z_pfd(1,  0,Z_R0,src_reg);
4389     z_pfd(2,  0,Z_R0,dst_reg);
4390     //    z_pfd(1,256,Z_R0,src_reg);    // Assume very short copy.
4391     //    z_pfd(2,256,Z_R0,dst_reg);
4392   }
4393 
4394   if (VM_Version::has_ExecuteExtensions()) {
4395     z_exrl(Z_R1, MVC_template);
4396   } else {
4397     z_ex(tmp1_reg, 0, Z_R0, Z_R1);
4398   }
4399 
4400   bind(done);
4401 
4402   BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");
4403 
4404   int block_end = offset();
4405   return block_end - block_start;
4406 }
4407 
4408 //-------------------------------------------------
4409 //   Constants (scalar and oop) in constant pool
4410 //-------------------------------------------------
4411 
4412 // Add a non-relocated constant to the CP.
4413 int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
4414   long    value  = val.value();
4415   address tocPos = long_constant(value);
4416 
4417   if (tocPos != NULL) {
4418     int tocOffset = (int)(tocPos - code()->consts()->start());
4419     return tocOffset;
4420   }
4421   // Address_constant returned NULL, so no constant entry has been created.
4422   // In that case, we return a "fatal" offset, just in case that subsequently
4423   // generated access code is executed.
4424   return -1;
4425 }
4426 
4427 // Returns the TOC offset where the address is stored.
4428 // Add a relocated constant to the CP.
4429 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
4430   // Use RelocationHolder::none for the constant pool entry.
4431   // Otherwise we will end up with a failing NativeCall::verify(x),
4432   // where x is the address of the constant pool entry.
4433   address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
4434 
4435   if (tocPos != NULL) {
4436     int              tocOffset = (int)(tocPos - code()->consts()->start());
4437     RelocationHolder rsp = oop.rspec();
4438     Relocation      *rel = rsp.reloc();
4439 
4440     // Store toc_offset in relocation, used by call_far_patchable.
4441     if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {
4442       ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);
4443     }
4444     // Relocate at the load's pc.
4445     relocate(rsp);
4446 
4447     return tocOffset;
4448   }
4449   // Address_constant returned NULL, so no constant entry has been created
4450   // in that case, we return a "fatal" offset, just in case that subsequently
4451   // generated access code is executed.
4452   return -1;
4453 }
4454 
4455 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
4456   int     tocOffset = store_const_in_toc(a);
4457   if (tocOffset == -1) return false;
4458   address tocPos    = tocOffset + code()->consts()->start();
4459   assert((address)code()->consts()->start() != NULL, "Please add CP address");
4460   relocate(a.rspec());
4461   load_long_pcrelative(dst, tocPos);
4462   return true;
4463 }
4464 
4465 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
4466   int     tocOffset = store_oop_in_toc(a);
4467   if (tocOffset == -1) return false;
4468   address tocPos    = tocOffset + code()->consts()->start();
4469   assert((address)code()->consts()->start() != NULL, "Please add CP address");
4470 
4471   load_addr_pcrelative(dst, tocPos);
4472   return true;
4473 }
4474 
4475 // If the instruction sequence at the given pc is a load_const_from_toc
4476 // sequence, return the value currently stored at the referenced position
4477 // in the TOC.
4478 intptr_t MacroAssembler::get_const_from_toc(address pc) {
4479 
4480   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
4481 
4482   long    offset  = get_load_const_from_toc_offset(pc);
4483   address dataLoc = NULL;
4484   if (is_load_const_from_toc_pcrelative(pc)) {
4485     dataLoc = pc + offset;
4486   } else {
4487     CodeBlob* cb = CodeCache::find_blob(pc);
4488     assert(cb && cb->is_nmethod(), "sanity");
4489     nmethod* nm = (nmethod*)cb;
4490     dataLoc = nm->ctable_begin() + offset;
4491   }
4492   return *(intptr_t *)dataLoc;
4493 }
4494 
4495 // If the instruction sequence at the given pc is a load_const_from_toc
4496 // sequence, copy the passed-in new_data value into the referenced
4497 // position in the TOC.
4498 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {
4499   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
4500 
4501   long    offset = MacroAssembler::get_load_const_from_toc_offset(pc);
4502   address dataLoc = NULL;
4503   if (is_load_const_from_toc_pcrelative(pc)) {
4504     dataLoc = pc+offset;
4505   } else {
4506     nmethod* nm = CodeCache::find_nmethod(pc);
4507     assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
4508     dataLoc = nm->ctable_begin() + offset;
4509   }
4510   if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
4511     *(unsigned long *)dataLoc = new_data;
4512   }
4513 }
4514 
4515 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc
4516 // site. Verify by calling is_load_const_from_toc() before!!
4517 // Offset is +/- 2**32 -> use long.
4518 long MacroAssembler::get_load_const_from_toc_offset(address a) {
4519   assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");
4520   //  expected code sequence:
4521   //    z_lgrl(t, simm32);    len = 6
4522   unsigned long inst;
4523   unsigned int  len = get_instruction(a, &inst);
4524   return get_pcrel_offset(inst);
4525 }
4526 
4527 //**********************************************************************************
4528 //  inspection of generated instruction sequences for a particular pattern
4529 //**********************************************************************************
4530 
4531 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {
4532 #ifdef ASSERT
4533   unsigned long inst;
4534   unsigned int  len = get_instruction(a+2, &inst);
4535   if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {
4536     const int range = 128;
4537     Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");
4538     VM_Version::z_SIGSEGV();
4539   }
4540 #endif
4541   // expected code sequence:
4542   //   z_lgrl(t, relAddr32);    len = 6
4543   //TODO: verify accessed data is in CP, if possible.
4544   return is_load_pcrelative_long(a);  // TODO: might be too general. Currently, only lgrl is used.
4545 }
4546 
4547 bool MacroAssembler::is_load_const_from_toc_call(address a) {
4548   return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());
4549 }
4550 
4551 bool MacroAssembler::is_load_const_call(address a) {
4552   return is_load_const(a) && is_call_byregister(a + load_const_size());
4553 }
4554 
4555 //-------------------------------------------------
4556 //   Emitters for some really CICS instructions
4557 //-------------------------------------------------
4558 
4559 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {
4560   assert(dst->encoding()%2==0, "must be an even/odd register pair");
4561   assert(src->encoding()%2==0, "must be an even/odd register pair");
4562   assert(pad<256, "must be a padding BYTE");
4563 
4564   Label retry;
4565   bind(retry);
4566   Assembler::z_mvcle(dst, src, pad);
4567   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4568 }
4569 
4570 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {
4571   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
4572   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
4573   assert(pad<256, "must be a padding BYTE");
4574 
4575   Label retry;
4576   bind(retry);
4577   Assembler::z_clcle(left, right, pad, Z_R0);
4578   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4579 }
4580 
4581 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {
4582   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
4583   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
4584   assert(pad<=0xfff, "must be a padding HALFWORD");
4585   assert(VM_Version::has_ETF2(), "instruction must be available");
4586 
4587   Label retry;
4588   bind(retry);
4589   Assembler::z_clclu(left, right, pad, Z_R0);
4590   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4591 }
4592 
4593 void MacroAssembler::search_string(Register end, Register start) {
4594   assert(end->encoding() != 0, "end address must not be in R0");
4595   assert(start->encoding() != 0, "start address must not be in R0");
4596 
4597   Label retry;
4598   bind(retry);
4599   Assembler::z_srst(end, start);
4600   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4601 }
4602 
4603 void MacroAssembler::search_string_uni(Register end, Register start) {
4604   assert(end->encoding() != 0, "end address must not be in R0");
4605   assert(start->encoding() != 0, "start address must not be in R0");
4606   assert(VM_Version::has_ETF3(), "instruction must be available");
4607 
4608   Label retry;
4609   bind(retry);
4610   Assembler::z_srstu(end, start);
4611   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4612 }
4613 
4614 void MacroAssembler::kmac(Register srcBuff) {
4615   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4616   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
4617 
4618   Label retry;
4619   bind(retry);
4620   Assembler::z_kmac(Z_R0, srcBuff);
4621   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4622 }
4623 
4624 void MacroAssembler::kimd(Register srcBuff) {
4625   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4626   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
4627 
4628   Label retry;
4629   bind(retry);
4630   Assembler::z_kimd(Z_R0, srcBuff);
4631   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4632 }
4633 
4634 void MacroAssembler::klmd(Register srcBuff) {
4635   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4636   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
4637 
4638   Label retry;
4639   bind(retry);
4640   Assembler::z_klmd(Z_R0, srcBuff);
4641   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4642 }
4643 
4644 void MacroAssembler::km(Register dstBuff, Register srcBuff) {
4645   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
4646   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
4647   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4648   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
4649   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4650 
4651   Label retry;
4652   bind(retry);
4653   Assembler::z_km(dstBuff, srcBuff);
4654   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4655 }
4656 
4657 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {
4658   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
4659   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
4660   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4661   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
4662   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4663 
4664   Label retry;
4665   bind(retry);
4666   Assembler::z_kmc(dstBuff, srcBuff);
4667   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4668 }
4669 
4670 void MacroAssembler::kmctr(Register dstBuff, Register ctrBuff, Register srcBuff) {
4671   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
4672   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
4673   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4674   assert(dstBuff->encoding()     != 0, "dst buffer address can't be in Z_R0");
4675   assert(ctrBuff->encoding()     != 0, "ctr buffer address can't be in Z_R0");
4676   assert(ctrBuff->encoding() % 2 == 0, "ctr buffer addr must be an even register");
4677   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
4678   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4679 
4680   Label retry;
4681   bind(retry);
4682   Assembler::z_kmctr(dstBuff, ctrBuff, srcBuff);
4683   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4684 }
4685 
4686 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {
4687   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4688 
4689   Label retry;
4690   bind(retry);
4691   Assembler::z_cksm(crcBuff, srcBuff);
4692   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4693 }
4694 
4695 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {
4696   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4697   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4698 
4699   Label retry;
4700   bind(retry);
4701   Assembler::z_troo(r1, r2, m3);
4702   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4703 }
4704 
4705 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {
4706   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4707   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4708 
4709   Label retry;
4710   bind(retry);
4711   Assembler::z_trot(r1, r2, m3);
4712   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4713 }
4714 
4715 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
4716   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4717   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4718 
4719   Label retry;
4720   bind(retry);
4721   Assembler::z_trto(r1, r2, m3);
4722   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4723 }
4724 
4725 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
4726   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4727   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4728 
4729   Label retry;
4730   bind(retry);
4731   Assembler::z_trtt(r1, r2, m3);
4732   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4733 }
4734 
4735 //---------------------------------------
4736 // Helpers for Intrinsic Emitters
4737 //---------------------------------------
4738 
4739 /**
4740  * uint32_t crc;
4741  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
4742  */
4743 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
4744   assert_different_registers(crc, table, tmp);
4745   assert_different_registers(val, table);
4746   if (crc == val) {      // Must rotate first to use the unmodified value.
4747     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
4748     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
4749   } else {
4750     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
4751     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
4752   }
4753   z_x(crc, Address(table, tmp, 0));
4754 }
4755 
4756 /**
4757  * uint32_t crc;
4758  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
4759  */
4760 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
4761   fold_byte_crc32(crc, crc, table, tmp);
4762 }
4763 
4764 /**
4765  * Emits code to update CRC-32 with a byte value according to constants in table.
4766  *
4767  * @param [in,out]crc Register containing the crc.
4768  * @param [in]val     Register containing the byte to fold into the CRC.
4769  * @param [in]table   Register containing the table of crc constants.
4770  *
4771  * uint32_t crc;
4772  * val = crc_table[(val ^ crc) & 0xFF];
4773  * crc = val ^ (crc >> 8);
4774  */
4775 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
4776   z_xr(val, crc);
4777   fold_byte_crc32(crc, val, table, val);
4778 }
4779 
4780 
4781 /**
4782  * @param crc   register containing existing CRC (32-bit)
4783  * @param buf   register pointing to input byte buffer (byte*)
4784  * @param len   register containing number of bytes
4785  * @param table register pointing to CRC table
4786  */
4787 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
4788   assert_different_registers(crc, buf, len, table, data);
4789 
4790   Label L_mainLoop, L_done;
4791   const int mainLoop_stepping = 1;
4792 
4793   // Process all bytes in a single-byte loop.
4794   z_ltr(len, len);
4795   z_brnh(L_done);
4796 
4797   bind(L_mainLoop);
4798     z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
4799     add2reg(buf, mainLoop_stepping);        // Advance buffer position.
4800     update_byte_crc32(crc, data, table);
4801     z_brct(len, L_mainLoop);                // Iterate.
4802 
4803   bind(L_done);
4804 }
4805 
4806 /**
4807  * Emits code to update CRC-32 with a 4-byte value according to constants in table.
4808  * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.
4809  *
4810  */
4811 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
4812                                         Register t0,  Register t1,  Register t2,    Register t3) {
4813   // This is what we implement (the DOBIG4 part):
4814   //
4815   // #define DOBIG4 c ^= *++buf4; \
4816   //         c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
4817   //             crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
4818   // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
4819   // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
4820   const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
4821   const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
4822   const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
4823   const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
4824 
4825   // XOR crc with next four bytes of buffer.
4826   lgr_if_needed(t0, crc);
4827   z_x(t0, Address(buf, bufDisp));
4828   if (bufInc != 0) {
4829     add2reg(buf, bufInc);
4830   }
4831 
4832   // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
4833   rotate_then_insert(t3, t0, 56-2, 63-2, 2,    true);  // ((c >>  0) & 0xff) << 2
4834   rotate_then_insert(t2, t0, 56-2, 63-2, 2-8,  true);  // ((c >>  8) & 0xff) << 2
4835   rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true);  // ((c >> 16) & 0xff) << 2
4836   rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true);  // ((c >> 24) & 0xff) << 2
4837 
4838   // XOR indexed table values to calculate updated crc.
4839   z_ly(t2, Address(table, t2, (intptr_t)ix1));
4840   z_ly(t0, Address(table, t0, (intptr_t)ix3));
4841   z_xy(t2, Address(table, t3, (intptr_t)ix0));
4842   z_xy(t0, Address(table, t1, (intptr_t)ix2));
4843   z_xr(t0, t2);           // Now t0 contains the updated CRC value.
4844   lgr_if_needed(crc, t0);
4845 }
4846 
4847 /**
4848  * @param crc   register containing existing CRC (32-bit)
4849  * @param buf   register pointing to input byte buffer (byte*)
4850  * @param len   register containing number of bytes
4851  * @param table register pointing to CRC table
4852  *
4853  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
4854  */
4855 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
4856                                         Register t0,  Register t1,  Register t2,  Register t3,
4857                                         bool invertCRC) {
4858   assert_different_registers(crc, buf, len, table);
4859 
4860   Label L_mainLoop, L_tail;
4861   Register  data = t0;
4862   Register  ctr  = Z_R0;
4863   const int mainLoop_stepping = 4;
4864   const int log_stepping      = exact_log2(mainLoop_stepping);
4865 
4866   // Don't test for len <= 0 here. This pathological case should not occur anyway.
4867   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
4868   // The situation itself is detected and handled correctly by the conditional branches
4869   // following aghi(len, -stepping) and aghi(len, +stepping).
4870 
4871   if (invertCRC) {
4872     not_(crc, noreg, false);           // 1s complement of crc
4873   }
4874 
4875   // Check for short (<4 bytes) buffer.
4876   z_srag(ctr, len, log_stepping);
4877   z_brnh(L_tail);
4878 
4879   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
4880   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
4881 
4882   BIND(L_mainLoop);
4883     update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
4884     z_brct(ctr, L_mainLoop); // Iterate.
4885 
4886   z_lrvr(crc, crc);          // Revert byte order back to original.
4887 
4888   // Process last few (<8) bytes of buffer.
4889   BIND(L_tail);
4890   update_byteLoop_crc32(crc, buf, len, table, data);
4891 
4892   if (invertCRC) {
4893     not_(crc, noreg, false);           // 1s complement of crc
4894   }
4895 }
4896 
4897 /**
4898  * @param crc   register containing existing CRC (32-bit)
4899  * @param buf   register pointing to input byte buffer (byte*)
4900  * @param len   register containing number of bytes
4901  * @param table register pointing to CRC table
4902  */
4903 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
4904                                         Register t0,  Register t1,  Register t2,  Register t3,
4905                                         bool invertCRC) {
4906   assert_different_registers(crc, buf, len, table);
4907   Register data = t0;
4908 
4909   if (invertCRC) {
4910     not_(crc, noreg, false);           // 1s complement of crc
4911   }
4912 
4913   update_byteLoop_crc32(crc, buf, len, table, data);
4914 
4915   if (invertCRC) {
4916     not_(crc, noreg, false);           // 1s complement of crc
4917   }
4918 }
4919 
4920 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
4921                                              bool invertCRC) {
4922   assert_different_registers(crc, buf, len, table, tmp);
4923 
4924   if (invertCRC) {
4925     not_(crc, noreg, false);           // 1s complement of crc
4926   }
4927 
4928   z_llgc(tmp, Address(buf, (intptr_t)0));  // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
4929   update_byte_crc32(crc, tmp, table);
4930 
4931   if (invertCRC) {
4932     not_(crc, noreg, false);           // 1s complement of crc
4933   }
4934 }
4935 
4936 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
4937                                                 bool invertCRC) {
4938   assert_different_registers(crc, val, table);
4939 
4940   if (invertCRC) {
4941     not_(crc, noreg, false);           // 1s complement of crc
4942   }
4943 
4944   update_byte_crc32(crc, val, table);
4945 
4946   if (invertCRC) {
4947     not_(crc, noreg, false);           // 1s complement of crc
4948   }
4949 }
4950 
4951 //
4952 // Code for BigInteger::multiplyToLen() intrinsic.
4953 //
4954 
4955 // dest_lo += src1 + src2
4956 // dest_hi += carry1 + carry2
4957 // Z_R7 is destroyed !
4958 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,
4959                                      Register src1, Register src2) {
4960   clear_reg(Z_R7);
4961   z_algr(dest_lo, src1);
4962   z_alcgr(dest_hi, Z_R7);
4963   z_algr(dest_lo, src2);
4964   z_alcgr(dest_hi, Z_R7);
4965 }
4966 
4967 // Multiply 64 bit by 64 bit first loop.
4968 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,
4969                                            Register x_xstart,
4970                                            Register y, Register y_idx,
4971                                            Register z,
4972                                            Register carry,
4973                                            Register product,
4974                                            Register idx, Register kdx) {
4975   // jlong carry, x[], y[], z[];
4976   // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
4977   //   huge_128 product = y[idx] * x[xstart] + carry;
4978   //   z[kdx] = (jlong)product;
4979   //   carry  = (jlong)(product >>> 64);
4980   // }
4981   // z[xstart] = carry;
4982 
4983   Label L_first_loop, L_first_loop_exit;
4984   Label L_one_x, L_one_y, L_multiply;
4985 
4986   z_aghi(xstart, -1);
4987   z_brl(L_one_x);   // Special case: length of x is 1.
4988 
4989   // Load next two integers of x.
4990   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
4991   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
4992 
4993 
4994   bind(L_first_loop);
4995 
4996   z_aghi(idx, -1);
4997   z_brl(L_first_loop_exit);
4998   z_aghi(idx, -1);
4999   z_brl(L_one_y);
5000 
5001   // Load next two integers of y.
5002   z_sllg(Z_R1_scratch, idx, LogBytesPerInt);
5003   mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));
5004 
5005 
5006   bind(L_multiply);
5007 
5008   Register multiplicand = product->successor();
5009   Register product_low = multiplicand;
5010 
5011   lgr_if_needed(multiplicand, x_xstart);
5012   z_mlgr(product, y_idx);     // multiplicand * y_idx -> product::multiplicand
5013   clear_reg(Z_R7);
5014   z_algr(product_low, carry); // Add carry to result.
5015   z_alcgr(product, Z_R7);     // Add carry of the last addition.
5016   add2reg(kdx, -2);
5017 
5018   // Store result.
5019   z_sllg(Z_R7, kdx, LogBytesPerInt);
5020   reg2mem_opt(product_low, Address(z, Z_R7, 0));
5021   lgr_if_needed(carry, product);
5022   z_bru(L_first_loop);
5023 
5024 
5025   bind(L_one_y); // Load one 32 bit portion of y as (0,value).
5026 
5027   clear_reg(y_idx);
5028   mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);
5029   z_bru(L_multiply);
5030 
5031 
5032   bind(L_one_x); // Load one 32 bit portion of x as (0,value).
5033 
5034   clear_reg(x_xstart);
5035   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
5036   z_bru(L_first_loop);
5037 
5038   bind(L_first_loop_exit);
5039 }
5040 
5041 // Multiply 64 bit by 64 bit and add 128 bit.
5042 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,
5043                                             Register z,
5044                                             Register yz_idx, Register idx,
5045                                             Register carry, Register product,
5046                                             int offset) {
5047   // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
5048   // z[kdx] = (jlong)product;
5049 
5050   Register multiplicand = product->successor();
5051   Register product_low = multiplicand;
5052 
5053   z_sllg(Z_R7, idx, LogBytesPerInt);
5054   mem2reg_opt(yz_idx, Address(y, Z_R7, offset));
5055 
5056   lgr_if_needed(multiplicand, x_xstart);
5057   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
5058   mem2reg_opt(yz_idx, Address(z, Z_R7, offset));
5059 
5060   add2_with_carry(product, product_low, carry, yz_idx);
5061 
5062   z_sllg(Z_R7, idx, LogBytesPerInt);
5063   reg2mem_opt(product_low, Address(z, Z_R7, offset));
5064 
5065 }
5066 
5067 // Multiply 128 bit by 128 bit. Unrolled inner loop.
5068 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
5069                                              Register y, Register z,
5070                                              Register yz_idx, Register idx,
5071                                              Register jdx,
5072                                              Register carry, Register product,
5073                                              Register carry2) {
5074   // jlong carry, x[], y[], z[];
5075   // int kdx = ystart+1;
5076   // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
5077   //   huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
5078   //   z[kdx+idx+1] = (jlong)product;
5079   //   jlong carry2 = (jlong)(product >>> 64);
5080   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
5081   //   z[kdx+idx] = (jlong)product;
5082   //   carry = (jlong)(product >>> 64);
5083   // }
5084   // idx += 2;
5085   // if (idx > 0) {
5086   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
5087   //   z[kdx+idx] = (jlong)product;
5088   //   carry = (jlong)(product >>> 64);
5089   // }
5090 
5091   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
5092 
5093   // scale the index
5094   lgr_if_needed(jdx, idx);
5095   and_imm(jdx, 0xfffffffffffffffcL);
5096   rshift(jdx, 2);
5097 
5098 
5099   bind(L_third_loop);
5100 
5101   z_aghi(jdx, -1);
5102   z_brl(L_third_loop_exit);
5103   add2reg(idx, -4);
5104 
5105   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
5106   lgr_if_needed(carry2, product);
5107 
5108   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
5109   lgr_if_needed(carry, product);
5110   z_bru(L_third_loop);
5111 
5112 
5113   bind(L_third_loop_exit);  // Handle any left-over operand parts.
5114 
5115   and_imm(idx, 0x3);
5116   z_brz(L_post_third_loop_done);
5117 
5118   Label L_check_1;
5119 
5120   z_aghi(idx, -2);
5121   z_brl(L_check_1);
5122 
5123   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
5124   lgr_if_needed(carry, product);
5125 
5126 
5127   bind(L_check_1);
5128 
5129   add2reg(idx, 0x2);
5130   and_imm(idx, 0x1);
5131   z_aghi(idx, -1);
5132   z_brl(L_post_third_loop_done);
5133 
5134   Register   multiplicand = product->successor();
5135   Register   product_low = multiplicand;
5136 
5137   z_sllg(Z_R7, idx, LogBytesPerInt);
5138   clear_reg(yz_idx);
5139   mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);
5140   lgr_if_needed(multiplicand, x_xstart);
5141   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
5142   clear_reg(yz_idx);
5143   mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);
5144 
5145   add2_with_carry(product, product_low, yz_idx, carry);
5146 
5147   z_sllg(Z_R7, idx, LogBytesPerInt);
5148   reg2mem_opt(product_low, Address(z, Z_R7, 0), false);
5149   rshift(product_low, 32);
5150 
5151   lshift(product, 32);
5152   z_ogr(product_low, product);
5153   lgr_if_needed(carry, product_low);
5154 
5155   bind(L_post_third_loop_done);
5156 }
5157 
5158 void MacroAssembler::multiply_to_len(Register x, Register xlen,
5159                                      Register y, Register ylen,
5160                                      Register z,
5161                                      Register tmp1, Register tmp2,
5162                                      Register tmp3, Register tmp4,
5163                                      Register tmp5) {
5164   ShortBranchVerifier sbv(this);
5165 
5166   assert_different_registers(x, xlen, y, ylen, z,
5167                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);
5168   assert_different_registers(x, xlen, y, ylen, z,
5169                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);
5170 
5171   z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
5172 
5173   // In openJdk, we store the argument as 32-bit value to slot.
5174   Address zlen(Z_SP, _z_abi(remaining_cargs));  // Int in long on big endian.
5175 
5176   const Register idx = tmp1;
5177   const Register kdx = tmp2;
5178   const Register xstart = tmp3;
5179 
5180   const Register y_idx = tmp4;
5181   const Register carry = tmp5;
5182   const Register product  = Z_R0_scratch;
5183   const Register x_xstart = Z_R8;
5184 
5185   // First Loop.
5186   //
5187   //   final static long LONG_MASK = 0xffffffffL;
5188   //   int xstart = xlen - 1;
5189   //   int ystart = ylen - 1;
5190   //   long carry = 0;
5191   //   for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
5192   //     long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
5193   //     z[kdx] = (int)product;
5194   //     carry = product >>> 32;
5195   //   }
5196   //   z[xstart] = (int)carry;
5197   //
5198 
5199   lgr_if_needed(idx, ylen);  // idx = ylen
5200   z_llgf(kdx, zlen);         // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
5201   clear_reg(carry);          // carry = 0
5202 
5203   Label L_done;
5204 
5205   lgr_if_needed(xstart, xlen);
5206   z_aghi(xstart, -1);
5207   z_brl(L_done);
5208 
5209   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
5210 
5211   NearLabel L_second_loop;
5212   compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);
5213 
5214   NearLabel L_carry;
5215   z_aghi(kdx, -1);
5216   z_brz(L_carry);
5217 
5218   // Store lower 32 bits of carry.
5219   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
5220   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5221   rshift(carry, 32);
5222   z_aghi(kdx, -1);
5223 
5224 
5225   bind(L_carry);
5226 
5227   // Store upper 32 bits of carry.
5228   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
5229   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5230 
5231   // Second and third (nested) loops.
5232   //
5233   // for (int i = xstart-1; i >= 0; i--) { // Second loop
5234   //   carry = 0;
5235   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
5236   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
5237   //                    (z[k] & LONG_MASK) + carry;
5238   //     z[k] = (int)product;
5239   //     carry = product >>> 32;
5240   //   }
5241   //   z[i] = (int)carry;
5242   // }
5243   //
5244   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
5245 
5246   const Register jdx = tmp1;
5247 
5248   bind(L_second_loop);
5249 
5250   clear_reg(carry);           // carry = 0;
5251   lgr_if_needed(jdx, ylen);   // j = ystart+1
5252 
5253   z_aghi(xstart, -1);         // i = xstart-1;
5254   z_brl(L_done);
5255 
5256   // Use free slots in the current stackframe instead of push/pop.
5257   Address zsave(Z_SP, _z_abi(carg_1));
5258   reg2mem_opt(z, zsave);
5259 
5260 
5261   Label L_last_x;
5262 
5263   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
5264   load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j
5265   z_aghi(xstart, -1);                           // i = xstart-1;
5266   z_brl(L_last_x);
5267 
5268   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
5269   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
5270 
5271 
5272   Label L_third_loop_prologue;
5273 
5274   bind(L_third_loop_prologue);
5275 
5276   Address xsave(Z_SP, _z_abi(carg_2));
5277   Address xlensave(Z_SP, _z_abi(carg_3));
5278   Address ylensave(Z_SP, _z_abi(carg_4));
5279 
5280   reg2mem_opt(x, xsave);
5281   reg2mem_opt(xstart, xlensave);
5282   reg2mem_opt(ylen, ylensave);
5283 
5284 
5285   multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
5286 
5287   mem2reg_opt(z, zsave);
5288   mem2reg_opt(x, xsave);
5289   mem2reg_opt(xlen, xlensave);   // This is the decrement of the loop counter!
5290   mem2reg_opt(ylen, ylensave);
5291 
5292   add2reg(tmp3, 1, xlen);
5293   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
5294   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5295   z_aghi(tmp3, -1);
5296   z_brl(L_done);
5297 
5298   rshift(carry, 32);
5299   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
5300   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5301   z_bru(L_second_loop);
5302 
5303   // Next infrequent code is moved outside loops.
5304   bind(L_last_x);
5305 
5306   clear_reg(x_xstart);
5307   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
5308   z_bru(L_third_loop_prologue);
5309 
5310   bind(L_done);
5311 
5312   z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
5313 }
5314 
5315 #ifndef PRODUCT
5316 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).
5317 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
5318   Label ok;
5319   if (check_equal) {
5320     z_bre(ok);
5321   } else {
5322     z_brne(ok);
5323   }
5324   stop(msg, id);
5325   bind(ok);
5326 }
5327 
5328 // Assert if CC indicates "low".
5329 void MacroAssembler::asm_assert_low(const char *msg, int id) {
5330   Label ok;
5331   z_brnl(ok);
5332   stop(msg, id);
5333   bind(ok);
5334 }
5335 
5336 // Assert if CC indicates "high".
5337 void MacroAssembler::asm_assert_high(const char *msg, int id) {
5338   Label ok;
5339   z_brnh(ok);
5340   stop(msg, id);
5341   bind(ok);
5342 }
5343 
5344 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false)
5345 // generate non-relocatable code.
5346 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) {
5347   Label ok;
5348   if (check_equal) { z_bre(ok); }
5349   else             { z_brne(ok); }
5350   stop_static(msg, id);
5351   bind(ok);
5352 }
5353 
5354 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
5355                                           Register mem_base, const char* msg, int id) {
5356   switch (size) {
5357     case 4:
5358       load_and_test_int(Z_R0, Address(mem_base, mem_offset));
5359       break;
5360     case 8:
5361       load_and_test_long(Z_R0,  Address(mem_base, mem_offset));
5362       break;
5363     default:
5364       ShouldNotReachHere();
5365   }
5366   if (allow_relocation) { asm_assert(check_equal, msg, id); }
5367   else                  { asm_assert_static(check_equal, msg, id); }
5368 }
5369 
5370 // Check the condition
5371 //   expected_size == FP - SP
5372 // after transformation:
5373 //   expected_size - FP + SP == 0
5374 // Destroys Register expected_size if no tmp register is passed.
5375 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {
5376   if (tmp == noreg) {
5377     tmp = expected_size;
5378   } else {
5379     if (tmp != expected_size) {
5380       z_lgr(tmp, expected_size);
5381     }
5382     z_algr(tmp, Z_SP);
5383     z_slg(tmp, 0, Z_R0, Z_SP);
5384     asm_assert_eq(msg, id);
5385   }
5386 }
5387 #endif // !PRODUCT
5388 
5389 void MacroAssembler::verify_thread() {
5390   if (VerifyThread) {
5391     unimplemented("", 117);
5392   }
5393 }
5394 
5395 // Save and restore functions: Exclude Z_R0.
5396 void MacroAssembler::save_volatile_regs(Register dst, int offset, bool include_fp, bool include_flags) {
5397   z_stmg(Z_R1, Z_R5, offset, dst); offset += 5 * BytesPerWord;
5398   if (include_fp) {
5399     z_std(Z_F0, Address(dst, offset)); offset += BytesPerWord;
5400     z_std(Z_F1, Address(dst, offset)); offset += BytesPerWord;
5401     z_std(Z_F2, Address(dst, offset)); offset += BytesPerWord;
5402     z_std(Z_F3, Address(dst, offset)); offset += BytesPerWord;
5403     z_std(Z_F4, Address(dst, offset)); offset += BytesPerWord;
5404     z_std(Z_F5, Address(dst, offset)); offset += BytesPerWord;
5405     z_std(Z_F6, Address(dst, offset)); offset += BytesPerWord;
5406     z_std(Z_F7, Address(dst, offset)); offset += BytesPerWord;
5407   }
5408   if (include_flags) {
5409     Label done;
5410     z_mvi(Address(dst, offset), 2); // encoding: equal
5411     z_bre(done);
5412     z_mvi(Address(dst, offset), 4); // encoding: higher
5413     z_brh(done);
5414     z_mvi(Address(dst, offset), 1); // encoding: lower
5415     bind(done);
5416   }
5417 }
5418 void MacroAssembler::restore_volatile_regs(Register src, int offset, bool include_fp, bool include_flags) {
5419   z_lmg(Z_R1, Z_R5, offset, src); offset += 5 * BytesPerWord;
5420   if (include_fp) {
5421     z_ld(Z_F0, Address(src, offset)); offset += BytesPerWord;
5422     z_ld(Z_F1, Address(src, offset)); offset += BytesPerWord;
5423     z_ld(Z_F2, Address(src, offset)); offset += BytesPerWord;
5424     z_ld(Z_F3, Address(src, offset)); offset += BytesPerWord;
5425     z_ld(Z_F4, Address(src, offset)); offset += BytesPerWord;
5426     z_ld(Z_F5, Address(src, offset)); offset += BytesPerWord;
5427     z_ld(Z_F6, Address(src, offset)); offset += BytesPerWord;
5428     z_ld(Z_F7, Address(src, offset)); offset += BytesPerWord;
5429   }
5430   if (include_flags) {
5431     z_cli(Address(src, offset), 2); // see encoding above
5432   }
5433 }
5434 
5435 // Plausibility check for oops.
5436 void MacroAssembler::verify_oop(Register oop, const char* msg) {
5437   if (!VerifyOops) return;
5438 
5439   BLOCK_COMMENT("verify_oop {");
5440   unsigned int nbytes_save = (5 + 8 + 1) * BytesPerWord;
5441   address entry_addr = StubRoutines::verify_oop_subroutine_entry_address();
5442 
5443   save_return_pc();
5444 
5445   // Push frame, but preserve flags
5446   z_lgr(Z_R0, Z_SP);
5447   z_lay(Z_SP, -((int64_t)nbytes_save + frame::z_abi_160_size), Z_SP);
5448   z_stg(Z_R0, _z_abi(callers_sp), Z_SP);
5449 
5450   save_volatile_regs(Z_SP, frame::z_abi_160_size, true, true);
5451 
5452   lgr_if_needed(Z_ARG2, oop);
5453   load_const_optimized(Z_ARG1, (address)msg);
5454   load_const_optimized(Z_R1, entry_addr);
5455   z_lg(Z_R1, 0, Z_R1);
5456   call_c(Z_R1);
5457 
5458   restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, true);
5459   pop_frame();
5460   restore_return_pc();
5461 
5462   BLOCK_COMMENT("} verify_oop ");
5463 }
5464 
5465 void MacroAssembler::verify_oop_addr(Address addr, const char* msg) {
5466   if (!VerifyOops) return;
5467 
5468   BLOCK_COMMENT("verify_oop {");
5469   unsigned int nbytes_save = (5 + 8) * BytesPerWord;
5470   address entry_addr = StubRoutines::verify_oop_subroutine_entry_address();
5471 
5472   save_return_pc();
5473   unsigned int frame_size = push_frame_abi160(nbytes_save); // kills Z_R0
5474   save_volatile_regs(Z_SP, frame::z_abi_160_size, true, false);
5475 
5476   z_lg(Z_ARG2, addr.plus_disp(frame_size));
5477   load_const_optimized(Z_ARG1, (address)msg);
5478   load_const_optimized(Z_R1, entry_addr);
5479   z_lg(Z_R1, 0, Z_R1);
5480   call_c(Z_R1);
5481 
5482   restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, false);
5483   pop_frame();
5484   restore_return_pc();
5485 
5486   BLOCK_COMMENT("} verify_oop ");
5487 }
5488 
5489 const char* MacroAssembler::stop_types[] = {
5490   "stop",
5491   "untested",
5492   "unimplemented",
5493   "shouldnotreachhere"
5494 };
5495 
5496 static void stop_on_request(const char* tp, const char* msg) {
5497   tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);
5498   guarantee(false, "Z assembly code requires stop: %s", msg);
5499 }
5500 
5501 void MacroAssembler::stop(int type, const char* msg, int id) {
5502   BLOCK_COMMENT(err_msg("stop: %s {", msg));
5503 
5504   // Setup arguments.
5505   load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
5506   load_const(Z_ARG2, (void*) msg);
5507   get_PC(Z_R14);     // Following code pushes a frame without entering a new function. Use current pc as return address.
5508   save_return_pc();  // Saves return pc Z_R14.
5509   push_frame_abi160(0);
5510   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
5511   // The plain disassembler does not recognize illtrap. It instead displays
5512   // a 32-bit value. Issuing two illtraps assures the disassembler finds
5513   // the proper beginning of the next instruction.
5514   z_illtrap(); // Illegal instruction.
5515   z_illtrap(); // Illegal instruction.
5516 
5517   BLOCK_COMMENT(" } stop");
5518 }
5519 
5520 // Special version of stop() for code size reduction.
5521 // Reuses the previously generated call sequence, if any.
5522 // Generates the call sequence on its own, if necessary.
5523 // Note: This code will work only in non-relocatable code!
5524 //       The relative address of the data elements (arg1, arg2) must not change.
5525 //       The reentry point must not move relative to it's users. This prerequisite
5526 //       should be given for "hand-written" code, if all chain calls are in the same code blob.
5527 //       Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
5528 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
5529   BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));
5530 
5531   // Setup arguments.
5532   if (allow_relocation) {
5533     // Relocatable version (for comparison purposes). Remove after some time.
5534     load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
5535     load_const(Z_ARG2, (void*) msg);
5536   } else {
5537     load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
5538     load_absolute_address(Z_ARG2, (address)msg);
5539   }
5540   if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
5541     BLOCK_COMMENT("branch to reentry point:");
5542     z_brc(bcondAlways, reentry);
5543   } else {
5544     BLOCK_COMMENT("reentry point:");
5545     reentry = pc();      // Re-entry point for subsequent stop calls.
5546     save_return_pc();    // Saves return pc Z_R14.
5547     push_frame_abi160(0);
5548     if (allow_relocation) {
5549       reentry = NULL;    // Prevent reentry if code relocation is allowed.
5550       call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
5551     } else {
5552       call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
5553     }
5554     z_illtrap(); // Illegal instruction as emergency stop, should the above call return.
5555   }
5556   BLOCK_COMMENT(" } stop_chain");
5557 
5558   return reentry;
5559 }
5560 
5561 // Special version of stop() for code size reduction.
5562 // Assumes constant relative addresses for data and runtime call.
5563 void MacroAssembler::stop_static(int type, const char* msg, int id) {
5564   stop_chain(NULL, type, msg, id, false);
5565 }
5566 
5567 void MacroAssembler::stop_subroutine() {
5568   unimplemented("stop_subroutine", 710);
5569 }
5570 
5571 // Prints msg to stdout from within generated code..
5572 void MacroAssembler::warn(const char* msg) {
5573   RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);
5574   load_absolute_address(Z_R1, (address) warning);
5575   load_absolute_address(Z_ARG1, (address) msg);
5576   (void) call(Z_R1);
5577   RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);
5578 }
5579 
5580 #ifndef PRODUCT
5581 
5582 // Write pattern 0x0101010101010101 in region [low-before, high+after].
5583 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {
5584   if (!ZapEmptyStackFields) return;
5585   BLOCK_COMMENT("zap memory region {");
5586   load_const_optimized(val, 0x0101010101010101);
5587   int size = before + after;
5588   if (low == high && size < 5 && size > 0) {
5589     int offset = -before*BytesPerWord;
5590     for (int i = 0; i < size; ++i) {
5591       z_stg(val, Address(low, offset));
5592       offset +=(1*BytesPerWord);
5593     }
5594   } else {
5595     add2reg(addr, -before*BytesPerWord, low);
5596     if (after) {
5597 #ifdef ASSERT
5598       jlong check = after * BytesPerWord;
5599       assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");
5600 #endif
5601       add2reg(high, after * BytesPerWord);
5602     }
5603     NearLabel loop;
5604     bind(loop);
5605     z_stg(val, Address(addr));
5606     add2reg(addr, 8);
5607     compare64_and_branch(addr, high, bcondNotHigh, loop);
5608     if (after) {
5609       add2reg(high, -after * BytesPerWord);
5610     }
5611   }
5612   BLOCK_COMMENT("} zap memory region");
5613 }
5614 #endif // !PRODUCT
5615 
5616 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
5617   _masm = masm;
5618   _masm->load_absolute_address(_rscratch, (address)flag_addr);
5619   _masm->load_and_test_int(_rscratch, Address(_rscratch));
5620   if (value) {
5621     _masm->z_brne(_label); // Skip if true, i.e. != 0.
5622   } else {
5623     _masm->z_bre(_label);  // Skip if false, i.e. == 0.
5624   }
5625 }
5626 
5627 SkipIfEqual::~SkipIfEqual() {
5628   _masm->bind(_label);
5629 }