1 /*
   2  * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2020 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // Major contributions by AHa, AS, JL, ML.
  27 
  28 #include "precompiled.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/barrierSetAssembler.hpp"
  32 #include "interp_masm_s390.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/interpreterRuntime.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/methodData.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "prims/jvmtiThreadState.hpp"
  40 #include "runtime/basicLock.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/javaThread.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "utilities/macros.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 
  48 // Implementation of InterpreterMacroAssembler.
  49 // This file specializes the assembler with interpreter-specific macros.
  50 
  51 #ifdef PRODUCT
  52 #define BLOCK_COMMENT(str)
  53 #define BIND(label)        bind(label);
  54 #else
  55 #define BLOCK_COMMENT(str) block_comment(str)
  56 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
  57 #endif
  58 
  59 void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) {
  60   assert(entry != NULL, "Entry must have been generated by now");
  61   assert(Rscratch != Z_R0, "Can't use R0 for addressing");
  62   branch_optimized(Assembler::bcondAlways, entry);
  63 }
  64 
  65 void InterpreterMacroAssembler::empty_expression_stack(void) {
  66   get_monitors(Z_R1_scratch);
  67   add2reg(Z_esp, -Interpreter::stackElementSize, Z_R1_scratch);
  68 }
  69 
  70 // Dispatch code executed in the prolog of a bytecode which does not do it's
  71 // own dispatch.
  72 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
  73   // On z/Architecture we are short on registers, therefore we do not preload the
  74   // dispatch address of the next bytecode.
  75 }
  76 
  77 // Dispatch code executed in the epilog of a bytecode which does not do it's
  78 // own dispatch.
  79 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
  80   dispatch_next(state, step);
  81 }
  82 
  83 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr, bool generate_poll) {
  84   z_llgc(Z_bytecode, bcp_incr, Z_R0, Z_bcp);  // Load next bytecode.
  85   add2reg(Z_bcp, bcp_incr);                   // Advance bcp. Add2reg produces optimal code.
  86   dispatch_base(state, Interpreter::dispatch_table(state), generate_poll);
  87 }
  88 
  89 // Common code to dispatch and dispatch_only.
  90 // Dispatch value in Lbyte_code and increment Lbcp.
  91 
  92 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table, bool generate_poll) {
  93   verify_FPU(1, state);
  94 
  95 #ifdef ASSERT
  96   address reentry = NULL;
  97   { Label OK;
  98     // Check if the frame pointer in Z_fp is correct.
  99     z_cg(Z_fp, 0, Z_SP);
 100     z_bre(OK);
 101     reentry = stop_chain_static(reentry, "invalid frame pointer Z_fp: " FILE_AND_LINE);
 102     bind(OK);
 103   }
 104   { Label OK;
 105     // check if the locals pointer in Z_locals is correct
 106     z_cg(Z_locals, _z_ijava_state_neg(locals), Z_fp);
 107     z_bre(OK);
 108     reentry = stop_chain_static(reentry, "invalid locals pointer Z_locals: " FILE_AND_LINE);
 109     bind(OK);
 110   }
 111 #endif
 112 
 113   // TODO: Maybe implement +VerifyActivationFrameSize here.
 114   // verify_thread(); // Too slow. We will just verify on method entry & exit.
 115   verify_oop(Z_tos, state);
 116 
 117   // Dispatch table to use.
 118   load_absolute_address(Z_tmp_1, (address)table);  // Z_tmp_1 = table;
 119 
 120   if (generate_poll) {
 121     address *sfpt_tbl = Interpreter::safept_table(state);
 122     if (table != sfpt_tbl) {
 123       Label dispatch;
 124       const Address poll_byte_addr(Z_thread, in_bytes(JavaThread::polling_word_offset()) + 7 /* Big Endian */);
 125       // Armed page has poll_bit set, if poll bit is cleared just continue.
 126       z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
 127       z_braz(dispatch);
 128       load_absolute_address(Z_tmp_1, (address)sfpt_tbl);  // Z_tmp_1 = table;
 129       bind(dispatch);
 130     }
 131   }
 132 
 133   // 0 <= Z_bytecode < 256 => Use a 32 bit shift, because it is shorter than sllg.
 134   // Z_bytecode must have been loaded zero-extended for this approach to be correct.
 135   z_sll(Z_bytecode, LogBytesPerWord, Z_R0);   // Multiply by wordSize.
 136   z_lg(Z_tmp_1, 0, Z_bytecode, Z_tmp_1);      // Get entry addr.
 137 
 138   z_br(Z_tmp_1);
 139 }
 140 
 141 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
 142   dispatch_base(state, Interpreter::dispatch_table(state), generate_poll);
 143 }
 144 
 145 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
 146   dispatch_base(state, Interpreter::normal_table(state));
 147 }
 148 
 149 void InterpreterMacroAssembler::dispatch_via(TosState state, address *table) {
 150   // Load current bytecode.
 151   z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t)0));
 152   dispatch_base(state, table);
 153 }
 154 
 155 // The following call_VM*_base() methods overload and mask the respective
 156 // declarations/definitions in class MacroAssembler. They are meant as a "detour"
 157 // to perform additional, template interpreter specific tasks before actually
 158 // calling their MacroAssembler counterparts.
 159 
 160 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point) {
 161   bool allow_relocation = true; // Fenerally valid variant. Assume code is relocated.
 162   // interpreter specific
 163   // Note: No need to save/restore bcp (Z_R13) pointer since these are callee
 164   // saved registers and no blocking/ GC can happen in leaf calls.
 165 
 166   // super call
 167   MacroAssembler::call_VM_leaf_base(entry_point, allow_relocation);
 168 }
 169 
 170 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
 171   // interpreter specific
 172   // Note: No need to save/restore bcp (Z_R13) pointer since these are callee
 173   // saved registers and no blocking/ GC can happen in leaf calls.
 174 
 175   // super call
 176   MacroAssembler::call_VM_leaf_base(entry_point, allow_relocation);
 177 }
 178 
 179 void InterpreterMacroAssembler::call_VM_base(Register oop_result, Register last_java_sp,
 180                                              address entry_point, bool check_exceptions) {
 181   bool allow_relocation = true; // Fenerally valid variant. Assume code is relocated.
 182   // interpreter specific
 183 
 184   save_bcp();
 185   save_esp();
 186   // super call
 187   MacroAssembler::call_VM_base(oop_result, last_java_sp,
 188                                entry_point, allow_relocation, check_exceptions);
 189   restore_bcp();
 190 }
 191 
 192 void InterpreterMacroAssembler::call_VM_base(Register oop_result, Register last_java_sp,
 193                                              address entry_point, bool allow_relocation,
 194                                              bool check_exceptions) {
 195   // interpreter specific
 196 
 197   save_bcp();
 198   save_esp();
 199   // super call
 200   MacroAssembler::call_VM_base(oop_result, last_java_sp,
 201                                entry_point, allow_relocation, check_exceptions);
 202   restore_bcp();
 203 }
 204 
 205 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
 206   if (JvmtiExport::can_pop_frame()) {
 207     BLOCK_COMMENT("check_and_handle_popframe {");
 208     Label L;
 209     // Initiate popframe handling only if it is not already being
 210     // processed. If the flag has the popframe_processing bit set, it
 211     // means that this code is called *during* popframe handling - we
 212     // don't want to reenter.
 213     // TODO: Check if all four state combinations could be visible.
 214     // If (processing and !pending) is an invisible/impossible state,
 215     // there is optimization potential by testing both bits at once.
 216     // Then, All_Zeroes and All_Ones means skip, Mixed means doit.
 217     testbit(Address(Z_thread, JavaThread::popframe_condition_offset()),
 218             exact_log2(JavaThread::popframe_pending_bit));
 219     z_bfalse(L);
 220     testbit(Address(Z_thread, JavaThread::popframe_condition_offset()),
 221             exact_log2(JavaThread::popframe_processing_bit));
 222     z_btrue(L);
 223 
 224     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 225     // address of the same-named entrypoint in the generated interpreter code.
 226     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 227     // The above call should (as its only effect) return the contents of the field
 228     // _remove_activation_preserving_args_entry in Z_RET.
 229     // We just jump there to have the work done.
 230     z_br(Z_RET);
 231     // There is no way for control to fall thru here.
 232 
 233     bind(L);
 234     BLOCK_COMMENT("} check_and_handle_popframe");
 235   }
 236 }
 237 
 238 
 239 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 240   Register RjvmtiState = Z_R1_scratch;
 241   int      tos_off     = in_bytes(JvmtiThreadState::earlyret_tos_offset());
 242   int      oop_off     = in_bytes(JvmtiThreadState::earlyret_oop_offset());
 243   int      val_off     = in_bytes(JvmtiThreadState::earlyret_value_offset());
 244   int      state_off   = in_bytes(JavaThread::jvmti_thread_state_offset());
 245 
 246   z_lg(RjvmtiState, state_off, Z_thread);
 247 
 248   switch (state) {
 249     case atos: z_lg(Z_tos, oop_off, RjvmtiState);
 250       store_const(Address(RjvmtiState, oop_off), 0L, 8, 8, Z_R0_scratch);
 251                                                     break;
 252     case ltos: z_lg(Z_tos, val_off, RjvmtiState);   break;
 253     case btos: // fall through
 254     case ztos: // fall through
 255     case ctos: // fall through
 256     case stos: // fall through
 257     case itos: z_llgf(Z_tos, val_off, RjvmtiState); break;
 258     case ftos: z_le(Z_ftos, val_off, RjvmtiState);  break;
 259     case dtos: z_ld(Z_ftos, val_off, RjvmtiState);  break;
 260     case vtos:   /* nothing to do */                break;
 261     default  : ShouldNotReachHere();
 262   }
 263 
 264   // Clean up tos value in the jvmti thread state.
 265   store_const(Address(RjvmtiState, val_off),   0L, 8, 8, Z_R0_scratch);
 266   // Set tos state field to illegal value.
 267   store_const(Address(RjvmtiState, tos_off), ilgl, 4, 1, Z_R0_scratch);
 268 }
 269 
 270 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
 271   if (JvmtiExport::can_force_early_return()) {
 272     BLOCK_COMMENT("check_and_handle_earlyret {");
 273     Label L;
 274     // arg regs are save, because we are just behind the call in call_VM_base
 275     Register jvmti_thread_state = Z_ARG2;
 276     Register tmp                = Z_ARG3;
 277     load_and_test_long(jvmti_thread_state, Address(Z_thread, JavaThread::jvmti_thread_state_offset()));
 278     z_bre(L); // if (thread->jvmti_thread_state() == NULL) exit;
 279 
 280     // Initiate earlyret handling only if it is not already being processed.
 281     // If the flag has the earlyret_processing bit set, it means that this code
 282     // is called *during* earlyret handling - we don't want to reenter.
 283 
 284     assert((JvmtiThreadState::earlyret_pending != 0) && (JvmtiThreadState::earlyret_inactive == 0),
 285           "must fix this check, when changing the values of the earlyret enum");
 286     assert(JvmtiThreadState::earlyret_pending == 1, "must fix this check, when changing the values of the earlyret enum");
 287 
 288     load_and_test_int(tmp, Address(jvmti_thread_state, JvmtiThreadState::earlyret_state_offset()));
 289     z_brz(L); // if (thread->jvmti_thread_state()->_earlyret_state != JvmtiThreadState::earlyret_pending) exit;
 290 
 291     // Call Interpreter::remove_activation_early_entry() to get the address of the
 292     // same-named entrypoint in the generated interpreter code.
 293     assert(sizeof(TosState) == 4, "unexpected size");
 294     z_l(Z_ARG1, Address(jvmti_thread_state, JvmtiThreadState::earlyret_tos_offset()));
 295     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Z_ARG1);
 296     // The above call should (as its only effect) return the contents of the field
 297     // _remove_activation_preserving_args_entry in Z_RET.
 298     // We just jump there to have the work done.
 299     z_br(Z_RET);
 300     // There is no way for control to fall thru here.
 301 
 302     bind(L);
 303     BLOCK_COMMENT("} check_and_handle_earlyret");
 304   }
 305 }
 306 
 307 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
 308   lgr_if_needed(Z_ARG1, arg_1);
 309   assert(arg_2 != Z_ARG1, "smashed argument");
 310   lgr_if_needed(Z_ARG2, arg_2);
 311   MacroAssembler::call_VM_leaf_base(entry_point, true);
 312 }
 313 
 314 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size) {
 315   Address param(Z_bcp, bcp_offset);
 316 
 317   BLOCK_COMMENT("get_cache_index_at_bcp {");
 318   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 319   if (index_size == sizeof(u2)) {
 320     load_sized_value(index, param, 2, false /*signed*/);
 321   } else if (index_size == sizeof(u4)) {
 322 
 323     load_sized_value(index, param, 4, false);
 324 
 325     // Check if the secondary index definition is still ~x, otherwise
 326     // we have to change the following assembler code to calculate the
 327     // plain index.
 328     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
 329     not_(index);  // Convert to plain index.
 330   } else if (index_size == sizeof(u1)) {
 331     z_llgc(index, param);
 332   } else {
 333     ShouldNotReachHere();
 334   }
 335   BLOCK_COMMENT("}");
 336 }
 337 
 338 
 339 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register cpe_offset,
 340                                                            int bcp_offset, size_t index_size) {
 341   BLOCK_COMMENT("get_cache_and_index_at_bcp {");
 342   assert_different_registers(cache, cpe_offset);
 343   get_cache_index_at_bcp(cpe_offset, bcp_offset, index_size);
 344   z_lg(cache, Address(Z_fp, _z_ijava_state_neg(cpoolCache)));
 345   // Convert from field index to ConstantPoolCache offset in bytes.
 346   z_sllg(cpe_offset, cpe_offset, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord));
 347   BLOCK_COMMENT("}");
 348 }
 349 
 350 // Kills Z_R0_scratch.
 351 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
 352                                                                         Register cpe_offset,
 353                                                                         Register bytecode,
 354                                                                         int byte_no,
 355                                                                         int bcp_offset,
 356                                                                         size_t index_size) {
 357   BLOCK_COMMENT("get_cache_and_index_and_bytecode_at_bcp {");
 358   get_cache_and_index_at_bcp(cache, cpe_offset, bcp_offset, index_size);
 359 
 360   // We want to load (from CP cache) the bytecode that corresponds to the passed-in byte_no.
 361   // It is located at (cache + cpe_offset + base_offset + indices_offset + (8-1) (last byte in DW) - (byte_no+1).
 362   // Instead of loading, shifting and masking a DW, we just load that one byte of interest with z_llgc (unsigned).
 363   const int base_ix_off = in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset());
 364   const int off_in_DW   = (8-1) - (1+byte_no);
 365   assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
 366   assert(ConstantPoolCacheEntry::bytecode_1_mask == 0xff, "");
 367   load_sized_value(bytecode, Address(cache, cpe_offset, base_ix_off+off_in_DW), 1, false /*signed*/);
 368 
 369   BLOCK_COMMENT("}");
 370 }
 371 
 372 // Load object from cpool->resolved_references(index).
 373 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) {
 374   assert_different_registers(result, index);
 375   get_constant_pool(result);
 376 
 377   // Convert
 378   //  - from field index to resolved_references() index and
 379   //  - from word index to byte offset.
 380   // Since this is a java object, it is potentially compressed.
 381   Register tmp = index;  // reuse
 382   z_sllg(index, index, LogBytesPerHeapOop); // Offset into resolved references array.
 383   // Load pointer for resolved_references[] objArray.
 384   z_lg(result, ConstantPool::cache_offset_in_bytes(), result);
 385   z_lg(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
 386   resolve_oop_handle(result); // Load resolved references array itself.
 387 #ifdef ASSERT
 388   NearLabel index_ok;
 389   z_lgf(Z_R0, Address(result, arrayOopDesc::length_offset_in_bytes()));
 390   z_sllg(Z_R0, Z_R0, LogBytesPerHeapOop);
 391   compare64_and_branch(tmp, Z_R0, Assembler::bcondLow, index_ok);
 392   stop("resolved reference index out of bounds", 0x09256);
 393   bind(index_ok);
 394 #endif
 395   z_agr(result, index);    // Address of indexed array element.
 396   load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp, noreg);
 397 }
 398 
 399 // load cpool->resolved_klass_at(index)
 400 void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass) {
 401   // int value = *(Rcpool->int_at_addr(which));
 402   // int resolved_klass_index = extract_low_short_from_int(value);
 403   z_llgh(offset, Address(cpool, offset, sizeof(ConstantPool) + 2)); // offset = resolved_klass_index (s390 is big-endian)
 404   z_sllg(offset, offset, LogBytesPerWord);                          // Convert 'index' to 'offset'
 405   z_lg(iklass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // iklass = cpool->_resolved_klasses
 406   z_lg(iklass, Address(iklass, offset, Array<Klass*>::base_offset_in_bytes()));
 407 }
 408 
 409 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
 410                                                                Register tmp,
 411                                                                int bcp_offset,
 412                                                                size_t index_size) {
 413   BLOCK_COMMENT("get_cache_entry_pointer_at_bcp {");
 414     get_cache_and_index_at_bcp(cache, tmp, bcp_offset, index_size);
 415     add2reg_with_index(cache, in_bytes(ConstantPoolCache::base_offset()), tmp, cache);
 416   BLOCK_COMMENT("}");
 417 }
 418 
 419 void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
 420                                                               Register cache,
 421                                                               Register cpe_offset,
 422                                                               Register method) {
 423   const int method_offset = in_bytes(
 424     ConstantPoolCache::base_offset() +
 425       ((byte_no == TemplateTable::f2_byte)
 426        ? ConstantPoolCacheEntry::f2_offset()
 427        : ConstantPoolCacheEntry::f1_offset()));
 428 
 429   z_lg(method, Address(cache, cpe_offset, method_offset)); // get f1 Method*
 430 }
 431 
 432 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
 433 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
 434 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 435                                                   Register Rsuper_klass,
 436                                                   Register Rtmp1,
 437                                                   Register Rtmp2,
 438                                                   Label &ok_is_subtype) {
 439   // Profile the not-null value's klass.
 440   profile_typecheck(Rtmp1, Rsub_klass, Rtmp2);
 441 
 442   // Do the check.
 443   check_klass_subtype(Rsub_klass, Rsuper_klass, Rtmp1, Rtmp2, ok_is_subtype);
 444 
 445   // Profile the failure of the check.
 446   profile_typecheck_failed(Rtmp1, Rtmp2);
 447 }
 448 
 449 // Pop topmost element from stack. It just disappears.
 450 // Useful if consumed previously by access via stackTop().
 451 void InterpreterMacroAssembler::popx(int len) {
 452   add2reg(Z_esp, len*Interpreter::stackElementSize);
 453   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 454 }
 455 
 456 // Get Address object of stack top. No checks. No pop.
 457 // Purpose: - Provide address of stack operand to exploit reg-mem operations.
 458 //          - Avoid RISC-like mem2reg - reg-reg-op sequence.
 459 Address InterpreterMacroAssembler::stackTop() {
 460   return Address(Z_esp, Interpreter::expr_offset_in_bytes(0));
 461 }
 462 
 463 void InterpreterMacroAssembler::pop_i(Register r) {
 464   z_l(r, Interpreter::expr_offset_in_bytes(0), Z_esp);
 465   add2reg(Z_esp, Interpreter::stackElementSize);
 466   assert_different_registers(r, Z_R1_scratch);
 467   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 468 }
 469 
 470 void InterpreterMacroAssembler::pop_ptr(Register r) {
 471   z_lg(r, Interpreter::expr_offset_in_bytes(0), Z_esp);
 472   add2reg(Z_esp, Interpreter::stackElementSize);
 473   assert_different_registers(r, Z_R1_scratch);
 474   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 475 }
 476 
 477 void InterpreterMacroAssembler::pop_l(Register r) {
 478   z_lg(r, Interpreter::expr_offset_in_bytes(0), Z_esp);
 479   add2reg(Z_esp, 2*Interpreter::stackElementSize);
 480   assert_different_registers(r, Z_R1_scratch);
 481   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 482 }
 483 
 484 void InterpreterMacroAssembler::pop_f(FloatRegister f) {
 485   mem2freg_opt(f, Address(Z_esp, Interpreter::expr_offset_in_bytes(0)), false);
 486   add2reg(Z_esp, Interpreter::stackElementSize);
 487   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 488 }
 489 
 490 void InterpreterMacroAssembler::pop_d(FloatRegister f) {
 491   mem2freg_opt(f, Address(Z_esp, Interpreter::expr_offset_in_bytes(0)), true);
 492   add2reg(Z_esp, 2*Interpreter::stackElementSize);
 493   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 494 }
 495 
 496 void InterpreterMacroAssembler::push_i(Register r) {
 497   assert_different_registers(r, Z_R1_scratch);
 498   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 499   z_st(r, Address(Z_esp));
 500   add2reg(Z_esp, -Interpreter::stackElementSize);
 501 }
 502 
 503 void InterpreterMacroAssembler::push_ptr(Register r) {
 504   z_stg(r, Address(Z_esp));
 505   add2reg(Z_esp, -Interpreter::stackElementSize);
 506 }
 507 
 508 void InterpreterMacroAssembler::push_l(Register r) {
 509   assert_different_registers(r, Z_R1_scratch);
 510   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 511   int offset = -Interpreter::stackElementSize;
 512   z_stg(r, Address(Z_esp, offset));
 513   clear_mem(Address(Z_esp), Interpreter::stackElementSize);
 514   add2reg(Z_esp, 2 * offset);
 515 }
 516 
 517 void InterpreterMacroAssembler::push_f(FloatRegister f) {
 518   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 519   freg2mem_opt(f, Address(Z_esp), false);
 520   add2reg(Z_esp, -Interpreter::stackElementSize);
 521 }
 522 
 523 void InterpreterMacroAssembler::push_d(FloatRegister d) {
 524   debug_only(verify_esp(Z_esp, Z_R1_scratch));
 525   int offset = -Interpreter::stackElementSize;
 526   freg2mem_opt(d, Address(Z_esp, offset));
 527   add2reg(Z_esp, 2 * offset);
 528 }
 529 
 530 void InterpreterMacroAssembler::push(TosState state) {
 531   verify_oop(Z_tos, state);
 532   switch (state) {
 533     case atos: push_ptr();           break;
 534     case btos: push_i();             break;
 535     case ztos:
 536     case ctos:
 537     case stos: push_i();             break;
 538     case itos: push_i();             break;
 539     case ltos: push_l();             break;
 540     case ftos: push_f();             break;
 541     case dtos: push_d();             break;
 542     case vtos: /* nothing to do */   break;
 543     default  : ShouldNotReachHere();
 544   }
 545 }
 546 
 547 void InterpreterMacroAssembler::pop(TosState state) {
 548   switch (state) {
 549     case atos: pop_ptr(Z_tos);       break;
 550     case btos: pop_i(Z_tos);         break;
 551     case ztos:
 552     case ctos:
 553     case stos: pop_i(Z_tos);         break;
 554     case itos: pop_i(Z_tos);         break;
 555     case ltos: pop_l(Z_tos);         break;
 556     case ftos: pop_f(Z_ftos);        break;
 557     case dtos: pop_d(Z_ftos);        break;
 558     case vtos: /* nothing to do */   break;
 559     default  : ShouldNotReachHere();
 560   }
 561   verify_oop(Z_tos, state);
 562 }
 563 
 564 // Helpers for swap and dup.
 565 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
 566   z_lg(val, Address(Z_esp, Interpreter::expr_offset_in_bytes(n)));
 567 }
 568 
 569 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
 570   z_stg(val, Address(Z_esp, Interpreter::expr_offset_in_bytes(n)));
 571 }
 572 
 573 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted(Register method) {
 574   // Satisfy interpreter calling convention (see generate_normal_entry()).
 575   z_lgr(Z_R10, Z_SP); // Set sender sp (aka initial caller sp, aka unextended sp).
 576   // Record top_frame_sp, because the callee might modify it, if it's compiled.
 577   z_stg(Z_SP, _z_ijava_state_neg(top_frame_sp), Z_fp);
 578   save_bcp();
 579   save_esp();
 580   z_lgr(Z_method, method); // Set Z_method (kills Z_fp!).
 581 }
 582 
 583 // Jump to from_interpreted entry of a call unless single stepping is possible
 584 // in this thread in which case we must call the i2i entry.
 585 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
 586   assert_different_registers(method, Z_R10 /*used for initial_caller_sp*/, temp);
 587   prepare_to_jump_from_interpreted(method);
 588 
 589   if (JvmtiExport::can_post_interpreter_events()) {
 590     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 591     // compiled code in threads for which the event is enabled. Check here for
 592     // interp_only_mode if these events CAN be enabled.
 593     z_lg(Z_R1_scratch, Address(method, Method::from_interpreted_offset()));
 594     MacroAssembler::load_and_test_int(Z_R0_scratch, Address(Z_thread, JavaThread::interp_only_mode_offset()));
 595     z_bcr(bcondEqual, Z_R1_scratch); // Run compiled code if zero.
 596     // Run interpreted.
 597     z_lg(Z_R1_scratch, Address(method, Method::interpreter_entry_offset()));
 598     z_br(Z_R1_scratch);
 599   } else {
 600     // Run compiled code.
 601     z_lg(Z_R1_scratch, Address(method, Method::from_interpreted_offset()));
 602     z_br(Z_R1_scratch);
 603   }
 604 }
 605 
 606 #ifdef ASSERT
 607 void InterpreterMacroAssembler::verify_esp(Register Resp, Register Rtemp) {
 608   // About to read or write Resp[0].
 609   // Make sure it is not in the monitors or the TOP_IJAVA_FRAME_ABI.
 610   address reentry = NULL;
 611 
 612   {
 613     // Check if the frame pointer in Z_fp is correct.
 614     NearLabel OK;
 615     z_cg(Z_fp, 0, Z_SP);
 616     z_bre(OK);
 617     reentry = stop_chain_static(reentry, "invalid frame pointer Z_fp");
 618     bind(OK);
 619   }
 620   {
 621     // Resp must not point into or below the operand stack,
 622     // i.e. IJAVA_STATE.monitors > Resp.
 623     NearLabel OK;
 624     Register Rmonitors = Rtemp;
 625     z_lg(Rmonitors, _z_ijava_state_neg(monitors), Z_fp);
 626     compareU64_and_branch(Rmonitors, Resp, bcondHigh, OK);
 627     reentry = stop_chain_static(reentry, "too many pops: Z_esp points into monitor area");
 628     bind(OK);
 629   }
 630   {
 631     // Resp may point to the last word of TOP_IJAVA_FRAME_ABI, but not below
 632     // i.e. !(Z_SP + frame::z_top_ijava_frame_abi_size - Interpreter::stackElementSize > Resp).
 633     NearLabel OK;
 634     Register Rabi_bottom = Rtemp;
 635     add2reg(Rabi_bottom, frame::z_top_ijava_frame_abi_size - Interpreter::stackElementSize, Z_SP);
 636     compareU64_and_branch(Rabi_bottom, Resp, bcondNotHigh, OK);
 637     reentry = stop_chain_static(reentry, "too many pushes: Z_esp points into TOP_IJAVA_FRAME_ABI");
 638     bind(OK);
 639   }
 640 }
 641 
 642 void InterpreterMacroAssembler::asm_assert_ijava_state_magic(Register tmp) {
 643   Label magic_ok;
 644   load_const_optimized(tmp, frame::z_istate_magic_number);
 645   z_cg(tmp, Address(Z_fp, _z_ijava_state_neg(magic)));
 646   z_bre(magic_ok);
 647   stop_static("error: wrong magic number in ijava_state access");
 648   bind(magic_ok);
 649 }
 650 #endif // ASSERT
 651 
 652 void InterpreterMacroAssembler::save_bcp() {
 653   z_stg(Z_bcp, Address(Z_fp, _z_ijava_state_neg(bcp)));
 654   asm_assert_ijava_state_magic(Z_bcp);
 655   NOT_PRODUCT(z_lg(Z_bcp, Address(Z_fp, _z_ijava_state_neg(bcp))));
 656 }
 657 
 658 void InterpreterMacroAssembler::restore_bcp() {
 659   asm_assert_ijava_state_magic(Z_bcp);
 660   z_lg(Z_bcp, Address(Z_fp, _z_ijava_state_neg(bcp)));
 661 }
 662 
 663 void InterpreterMacroAssembler::save_esp() {
 664   z_stg(Z_esp, Address(Z_fp, _z_ijava_state_neg(esp)));
 665 }
 666 
 667 void InterpreterMacroAssembler::restore_esp() {
 668   asm_assert_ijava_state_magic(Z_esp);
 669   z_lg(Z_esp, Address(Z_fp, _z_ijava_state_neg(esp)));
 670 }
 671 
 672 void InterpreterMacroAssembler::get_monitors(Register reg) {
 673   asm_assert_ijava_state_magic(reg);
 674   mem2reg_opt(reg, Address(Z_fp, _z_ijava_state_neg(monitors)));
 675 }
 676 
 677 void InterpreterMacroAssembler::save_monitors(Register reg) {
 678   reg2mem_opt(reg, Address(Z_fp, _z_ijava_state_neg(monitors)));
 679 }
 680 
 681 void InterpreterMacroAssembler::get_mdp(Register mdp) {
 682   z_lg(mdp, _z_ijava_state_neg(mdx), Z_fp);
 683 }
 684 
 685 void InterpreterMacroAssembler::save_mdp(Register mdp) {
 686   z_stg(mdp, _z_ijava_state_neg(mdx), Z_fp);
 687 }
 688 
 689 // Values that are only read (besides initialization).
 690 void InterpreterMacroAssembler::restore_locals() {
 691   asm_assert_ijava_state_magic(Z_locals);
 692   z_lg(Z_locals, Address(Z_fp, _z_ijava_state_neg(locals)));
 693 }
 694 
 695 void InterpreterMacroAssembler::get_method(Register reg) {
 696   asm_assert_ijava_state_magic(reg);
 697   z_lg(reg, Address(Z_fp, _z_ijava_state_neg(method)));
 698 }
 699 
 700 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(Register Rdst, int bcp_offset,
 701                                                           signedOrNot is_signed) {
 702   // Rdst is an 8-byte return value!!!
 703 
 704   // Unaligned loads incur only a small penalty on z/Architecture. The penalty
 705   // is a few (2..3) ticks, even when the load crosses a cache line
 706   // boundary. In case of a cache miss, the stall could, of course, be
 707   // much longer.
 708 
 709   switch (is_signed) {
 710     case Signed:
 711       z_lgh(Rdst, bcp_offset, Z_R0, Z_bcp);
 712      break;
 713    case Unsigned:
 714      z_llgh(Rdst, bcp_offset, Z_R0, Z_bcp);
 715      break;
 716    default:
 717      ShouldNotReachHere();
 718   }
 719 }
 720 
 721 
 722 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(Register Rdst, int bcp_offset,
 723                                                           setCCOrNot set_cc) {
 724   // Rdst is an 8-byte return value!!!
 725 
 726   // Unaligned loads incur only a small penalty on z/Architecture. The penalty
 727   // is a few (2..3) ticks, even when the load crosses a cache line
 728   // boundary. In case of a cache miss, the stall could, of course, be
 729   // much longer.
 730 
 731   // Both variants implement a sign-extending int2long load.
 732   if (set_cc == set_CC) {
 733     load_and_test_int2long(Rdst, Address(Z_bcp, (intptr_t)bcp_offset));
 734   } else {
 735     mem2reg_signed_opt(    Rdst, Address(Z_bcp, (intptr_t)bcp_offset));
 736   }
 737 }
 738 
 739 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
 740   get_method(Rdst);
 741   mem2reg_opt(Rdst, Address(Rdst, Method::const_offset()));
 742   mem2reg_opt(Rdst, Address(Rdst, ConstMethod::constants_offset()));
 743 }
 744 
 745 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
 746   get_constant_pool(Rcpool);
 747   mem2reg_opt(Rtags, Address(Rcpool, ConstantPool::tags_offset_in_bytes()));
 748 }
 749 
 750 // Unlock if synchronized method.
 751 //
 752 // Unlock the receiver if this is a synchronized method.
 753 // Unlock any Java monitors from synchronized blocks.
 754 //
 755 // If there are locked Java monitors
 756 //   If throw_monitor_exception
 757 //     throws IllegalMonitorStateException
 758 //   Else if install_monitor_exception
 759 //     installs IllegalMonitorStateException
 760 //   Else
 761 //     no error processing
 762 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
 763                                                               bool throw_monitor_exception,
 764                                                               bool install_monitor_exception) {
 765   NearLabel unlocked, unlock, no_unlock;
 766 
 767   {
 768     Register R_method = Z_ARG2;
 769     Register R_do_not_unlock_if_synchronized = Z_ARG3;
 770 
 771     // Get the value of _do_not_unlock_if_synchronized into G1_scratch.
 772     const Address do_not_unlock_if_synchronized(Z_thread,
 773                                                 JavaThread::do_not_unlock_if_synchronized_offset());
 774     load_sized_value(R_do_not_unlock_if_synchronized, do_not_unlock_if_synchronized, 1, false /*unsigned*/);
 775     z_mvi(do_not_unlock_if_synchronized, false); // Reset the flag.
 776 
 777     // Check if synchronized method.
 778     get_method(R_method);
 779     verify_oop(Z_tos, state);
 780     push(state); // Save tos/result.
 781     testbit(method2_(R_method, access_flags), JVM_ACC_SYNCHRONIZED_BIT);
 782     z_bfalse(unlocked);
 783 
 784     // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 785     // is set.
 786     compareU64_and_branch(R_do_not_unlock_if_synchronized, (intptr_t)0L, bcondNotEqual, no_unlock);
 787   }
 788 
 789   // unlock monitor
 790 
 791   // BasicObjectLock will be first in list, since this is a
 792   // synchronized method. However, need to check that the object has
 793   // not been unlocked by an explicit monitorexit bytecode.
 794   const Address monitor(Z_fp, -(frame::z_ijava_state_size + (int) sizeof(BasicObjectLock)));
 795   // We use Z_ARG2 so that if we go slow path it will be the correct
 796   // register for unlock_object to pass to VM directly.
 797   load_address(Z_ARG2, monitor); // Address of first monitor.
 798   z_lg(Z_ARG3, Address(Z_ARG2, BasicObjectLock::obj_offset_in_bytes()));
 799   compareU64_and_branch(Z_ARG3, (intptr_t)0L, bcondNotEqual, unlock);
 800 
 801   if (throw_monitor_exception) {
 802     // Entry already unlocked need to throw an exception.
 803     MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 804     should_not_reach_here();
 805   } else {
 806     // Monitor already unlocked during a stack unroll.
 807     // If requested, install an illegal_monitor_state_exception.
 808     // Continue with stack unrolling.
 809     if (install_monitor_exception) {
 810       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
 811     }
 812    z_bru(unlocked);
 813   }
 814 
 815   bind(unlock);
 816 
 817   unlock_object(Z_ARG2);
 818 
 819   bind(unlocked);
 820 
 821   // I0, I1: Might contain return value
 822 
 823   // Check that all monitors are unlocked.
 824   {
 825     NearLabel loop, exception, entry, restart;
 826     const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 827     // We use Z_ARG2 so that if we go slow path it will be the correct
 828     // register for unlock_object to pass to VM directly.
 829     Register R_current_monitor = Z_ARG2;
 830     Register R_monitor_block_bot = Z_ARG1;
 831     const Address monitor_block_top(Z_fp, _z_ijava_state_neg(monitors));
 832     const Address monitor_block_bot(Z_fp, -frame::z_ijava_state_size);
 833 
 834     bind(restart);
 835     // Starting with top-most entry.
 836     z_lg(R_current_monitor, monitor_block_top);
 837     // Points to word before bottom of monitor block.
 838     load_address(R_monitor_block_bot, monitor_block_bot);
 839     z_bru(entry);
 840 
 841     // Entry already locked, need to throw exception.
 842     bind(exception);
 843 
 844     if (throw_monitor_exception) {
 845       // Throw exception.
 846       MacroAssembler::call_VM(noreg,
 847                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
 848                                                throw_illegal_monitor_state_exception));
 849       should_not_reach_here();
 850     } else {
 851       // Stack unrolling. Unlock object and install illegal_monitor_exception.
 852       // Unlock does not block, so don't have to worry about the frame.
 853       // We don't have to preserve c_rarg1 since we are going to throw an exception.
 854       unlock_object(R_current_monitor);
 855       if (install_monitor_exception) {
 856         call_VM(noreg, CAST_FROM_FN_PTR(address,
 857                                         InterpreterRuntime::
 858                                         new_illegal_monitor_state_exception));
 859       }
 860       z_bru(restart);
 861     }
 862 
 863     bind(loop);
 864     // Check if current entry is used.
 865     load_and_test_long(Z_R0_scratch, Address(R_current_monitor, BasicObjectLock::obj_offset_in_bytes()));
 866     z_brne(exception);
 867 
 868     add2reg(R_current_monitor, entry_size); // Otherwise advance to next entry.
 869     bind(entry);
 870     compareU64_and_branch(R_current_monitor, R_monitor_block_bot, bcondNotEqual, loop);
 871   }
 872 
 873   bind(no_unlock);
 874   pop(state);
 875   verify_oop(Z_tos, state);
 876 }
 877 
 878 void InterpreterMacroAssembler::narrow(Register result, Register ret_type) {
 879   get_method(ret_type);
 880   z_lg(ret_type, Address(ret_type, in_bytes(Method::const_offset())));
 881   z_lb(ret_type, Address(ret_type, in_bytes(ConstMethod::result_type_offset())));
 882 
 883   Label notBool, notByte, notChar, done;
 884 
 885   // common case first
 886   compareU32_and_branch(ret_type, T_INT, bcondEqual, done);
 887 
 888   compareU32_and_branch(ret_type, T_BOOLEAN, bcondNotEqual, notBool);
 889   z_nilf(result, 0x1);
 890   z_bru(done);
 891 
 892   bind(notBool);
 893   compareU32_and_branch(ret_type, T_BYTE, bcondNotEqual, notByte);
 894   z_lbr(result, result);
 895   z_bru(done);
 896 
 897   bind(notByte);
 898   compareU32_and_branch(ret_type, T_CHAR, bcondNotEqual, notChar);
 899   z_nilf(result, 0xffff);
 900   z_bru(done);
 901 
 902   bind(notChar);
 903   // compareU32_and_branch(ret_type, T_SHORT, bcondNotEqual, notShort);
 904   z_lhr(result, result);
 905 
 906   // Nothing to do for T_INT
 907   bind(done);
 908 }
 909 
 910 // remove activation
 911 //
 912 // Unlock the receiver if this is a synchronized method.
 913 // Unlock any Java monitors from synchronized blocks.
 914 // Remove the activation from the stack.
 915 //
 916 // If there are locked Java monitors
 917 //   If throw_monitor_exception
 918 //     throws IllegalMonitorStateException
 919 //   Else if install_monitor_exception
 920 //     installs IllegalMonitorStateException
 921 //   Else
 922 //     no error processing
 923 void InterpreterMacroAssembler::remove_activation(TosState state,
 924                                                   Register return_pc,
 925                                                   bool throw_monitor_exception,
 926                                                   bool install_monitor_exception,
 927                                                   bool notify_jvmti) {
 928   BLOCK_COMMENT("remove_activation {");
 929   unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
 930 
 931   // Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
 932   notify_method_exit(false, state, notify_jvmti ? NotifyJVMTI : SkipNotifyJVMTI);
 933 
 934   if (StackReservedPages > 0) {
 935     BLOCK_COMMENT("reserved_stack_check:");
 936     // Test if reserved zone needs to be enabled.
 937     Label no_reserved_zone_enabling;
 938 
 939     // Compare frame pointers. There is no good stack pointer, as with stack
 940     // frame compression we can get different SPs when we do calls. A subsequent
 941     // call could have a smaller SP, so that this compare succeeds for an
 942     // inner call of the method annotated with ReservedStack.
 943     z_lg(Z_R0, Address(Z_SP, (intptr_t)_z_abi(callers_sp)));
 944     z_clg(Z_R0, Address(Z_thread, JavaThread::reserved_stack_activation_offset())); // Compare with frame pointer in memory.
 945     z_brl(no_reserved_zone_enabling);
 946 
 947     // Enable reserved zone again, throw stack overflow exception.
 948     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
 949     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError));
 950 
 951     should_not_reach_here();
 952 
 953     bind(no_reserved_zone_enabling);
 954   }
 955 
 956   verify_oop(Z_tos, state);
 957   verify_thread();
 958 
 959   pop_interpreter_frame(return_pc, Z_ARG2, Z_ARG3);
 960   BLOCK_COMMENT("} remove_activation");
 961 }
 962 
 963 // lock object
 964 //
 965 // Registers alive
 966 //   monitor - Address of the BasicObjectLock to be used for locking,
 967 //             which must be initialized with the object to lock.
 968 //   object  - Address of the object to be locked.
 969 void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
 970   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
 971 }
 972 
 973 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
 974 //
 975 // Registers alive
 976 //   monitor - address of the BasicObjectLock to be used for locking,
 977 //             which must be initialized with the object to lock.
 978 //
 979 // Throw IllegalMonitorException if object is not locked by current thread.
 980 void InterpreterMacroAssembler::unlock_object(Register monitor, Register object) {
 981   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
 982 }
 983 
 984 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
 985   assert(ProfileInterpreter, "must be profiling interpreter");
 986   load_and_test_long(mdp, Address(Z_fp, _z_ijava_state_neg(mdx)));
 987   z_brz(zero_continue);
 988 }
 989 
 990 // Set the method data pointer for the current bcp.
 991 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 992   assert(ProfileInterpreter, "must be profiling interpreter");
 993   Label    set_mdp;
 994   Register mdp    = Z_ARG4;
 995   Register method = Z_ARG5;
 996 
 997   get_method(method);
 998   // Test MDO to avoid the call if it is NULL.
 999   load_and_test_long(mdp, method2_(method, method_data));
1000   z_brz(set_mdp);
1001 
1002   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), method, Z_bcp);
1003   // Z_RET: mdi
1004   // Mdo is guaranteed to be non-zero here, we checked for it before the call.
1005   assert(method->is_nonvolatile(), "choose nonvolatile reg or reload from frame");
1006   z_lg(mdp, method2_(method, method_data)); // Must reload, mdp is volatile reg.
1007   add2reg_with_index(mdp, in_bytes(MethodData::data_offset()), Z_RET, mdp);
1008 
1009   bind(set_mdp);
1010   save_mdp(mdp);
1011 }
1012 
1013 void InterpreterMacroAssembler::verify_method_data_pointer() {
1014   assert(ProfileInterpreter, "must be profiling interpreter");
1015 #ifdef ASSERT
1016   NearLabel verify_continue;
1017   Register bcp_expected = Z_ARG3;
1018   Register mdp    = Z_ARG4;
1019   Register method = Z_ARG5;
1020 
1021   test_method_data_pointer(mdp, verify_continue); // If mdp is zero, continue
1022   get_method(method);
1023 
1024   // If the mdp is valid, it will point to a DataLayout header which is
1025   // consistent with the bcp. The converse is highly probable also.
1026   load_sized_value(bcp_expected, Address(mdp, DataLayout::bci_offset()), 2, false /*signed*/);
1027   z_ag(bcp_expected, Address(method, Method::const_offset()));
1028   load_address(bcp_expected, Address(bcp_expected, ConstMethod::codes_offset()));
1029   compareU64_and_branch(bcp_expected, Z_bcp, bcondEqual, verify_continue);
1030   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), method, Z_bcp, mdp);
1031   bind(verify_continue);
1032 #endif // ASSERT
1033 }
1034 
1035 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) {
1036   assert(ProfileInterpreter, "must be profiling interpreter");
1037   z_stg(value, constant, mdp_in);
1038 }
1039 
1040 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1041                                                       int constant,
1042                                                       Register tmp,
1043                                                       bool decrement) {
1044   assert_different_registers(mdp_in, tmp);
1045   // counter address
1046   Address data(mdp_in, constant);
1047   const int delta = decrement ? -DataLayout::counter_increment : DataLayout::counter_increment;
1048   add2mem_64(Address(mdp_in, constant), delta, tmp);
1049 }
1050 
1051 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
1052                                                 int flag_byte_constant) {
1053   assert(ProfileInterpreter, "must be profiling interpreter");
1054   // Set the flag.
1055   z_oi(Address(mdp_in, DataLayout::flags_offset()), flag_byte_constant);
1056 }
1057 
1058 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1059                                                  int offset,
1060                                                  Register value,
1061                                                  Register test_value_out,
1062                                                  Label& not_equal_continue) {
1063   assert(ProfileInterpreter, "must be profiling interpreter");
1064   if (test_value_out == noreg) {
1065     z_cg(value, Address(mdp_in, offset));
1066     z_brne(not_equal_continue);
1067   } else {
1068     // Put the test value into a register, so caller can use it:
1069     z_lg(test_value_out, Address(mdp_in, offset));
1070     compareU64_and_branch(test_value_out, value, bcondNotEqual, not_equal_continue);
1071   }
1072 }
1073 
1074 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) {
1075   update_mdp_by_offset(mdp_in, noreg, offset_of_disp);
1076 }
1077 
1078 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1079                                                      Register dataidx,
1080                                                      int offset_of_disp) {
1081   assert(ProfileInterpreter, "must be profiling interpreter");
1082   Address disp_address(mdp_in, dataidx, offset_of_disp);
1083   Assembler::z_ag(mdp_in, disp_address);
1084   save_mdp(mdp_in);
1085 }
1086 
1087 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) {
1088   assert(ProfileInterpreter, "must be profiling interpreter");
1089   add2reg(mdp_in, constant);
1090   save_mdp(mdp_in);
1091 }
1092 
1093 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1094   assert(ProfileInterpreter, "must be profiling interpreter");
1095   assert(return_bci->is_nonvolatile(), "choose nonvolatile reg or save/restore");
1096   call_VM(noreg,
1097           CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
1098           return_bci);
1099 }
1100 
1101 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) {
1102   if (ProfileInterpreter) {
1103     Label profile_continue;
1104 
1105     // If no method data exists, go to profile_continue.
1106     // Otherwise, assign to mdp.
1107     test_method_data_pointer(mdp, profile_continue);
1108 
1109     // We are taking a branch. Increment the taken count.
1110     // We inline increment_mdp_data_at to return bumped_count in a register
1111     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1112     Address data(mdp, JumpData::taken_offset());
1113     z_lg(bumped_count, data);
1114     // 64-bit overflow is very unlikely. Saturation to 32-bit values is
1115     // performed when reading the counts.
1116     add2reg(bumped_count, DataLayout::counter_increment);
1117     z_stg(bumped_count, data); // Store back out
1118 
1119     // The method data pointer needs to be updated to reflect the new target.
1120     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1121     bind(profile_continue);
1122   }
1123 }
1124 
1125 // Kills Z_R1_scratch.
1126 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1127   if (ProfileInterpreter) {
1128     Label profile_continue;
1129 
1130     // If no method data exists, go to profile_continue.
1131     test_method_data_pointer(mdp, profile_continue);
1132 
1133     // We are taking a branch. Increment the not taken count.
1134     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()), Z_R1_scratch);
1135 
1136     // The method data pointer needs to be updated to correspond to
1137     // the next bytecode.
1138     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1139     bind(profile_continue);
1140   }
1141 }
1142 
1143 // Kills: Z_R1_scratch.
1144 void InterpreterMacroAssembler::profile_call(Register mdp) {
1145   if (ProfileInterpreter) {
1146     Label profile_continue;
1147 
1148     // If no method data exists, go to profile_continue.
1149     test_method_data_pointer(mdp, profile_continue);
1150 
1151     // We are making a call. Increment the count.
1152     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1153 
1154     // The method data pointer needs to be updated to reflect the new target.
1155     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1156     bind(profile_continue);
1157   }
1158 }
1159 
1160 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1161   if (ProfileInterpreter) {
1162     Label profile_continue;
1163 
1164     // If no method data exists, go to profile_continue.
1165     test_method_data_pointer(mdp, profile_continue);
1166 
1167     // We are making a call. Increment the count.
1168     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1169 
1170     // The method data pointer needs to be updated to reflect the new target.
1171     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1172     bind(profile_continue);
1173   }
1174 }
1175 
1176 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1177                                                      Register mdp,
1178                                                      Register reg2,
1179                                                      bool receiver_can_be_null) {
1180   if (ProfileInterpreter) {
1181     NearLabel profile_continue;
1182 
1183     // If no method data exists, go to profile_continue.
1184     test_method_data_pointer(mdp, profile_continue);
1185 
1186     NearLabel skip_receiver_profile;
1187     if (receiver_can_be_null) {
1188       NearLabel not_null;
1189       compareU64_and_branch(receiver, (intptr_t)0L, bcondNotEqual, not_null);
1190       // We are making a call. Increment the count for null receiver.
1191       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1192       z_bru(skip_receiver_profile);
1193       bind(not_null);
1194     }
1195 
1196     // Record the receiver type.
1197     record_klass_in_profile(receiver, mdp, reg2, true);
1198     bind(skip_receiver_profile);
1199 
1200     // The method data pointer needs to be updated to reflect the new target.
1201     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1202     bind(profile_continue);
1203   }
1204 }
1205 
1206 // This routine creates a state machine for updating the multi-row
1207 // type profile at a virtual call site (or other type-sensitive bytecode).
1208 // The machine visits each row (of receiver/count) until the receiver type
1209 // is found, or until it runs out of rows. At the same time, it remembers
1210 // the location of the first empty row. (An empty row records null for its
1211 // receiver, and can be allocated for a newly-observed receiver type.)
1212 // Because there are two degrees of freedom in the state, a simple linear
1213 // search will not work; it must be a decision tree. Hence this helper
1214 // function is recursive, to generate the required tree structured code.
1215 // It's the interpreter, so we are trading off code space for speed.
1216 // See below for example code.
1217 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1218                                         Register receiver, Register mdp,
1219                                         Register reg2, int start_row,
1220                                         Label& done, bool is_virtual_call) {
1221   if (TypeProfileWidth == 0) {
1222     if (is_virtual_call) {
1223       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1224     }
1225     return;
1226   }
1227 
1228   int last_row = VirtualCallData::row_limit() - 1;
1229   assert(start_row <= last_row, "must be work left to do");
1230   // Test this row for both the receiver and for null.
1231   // Take any of three different outcomes:
1232   //   1. found receiver => increment count and goto done
1233   //   2. found null => keep looking for case 1, maybe allocate this cell
1234   //   3. found something else => keep looking for cases 1 and 2
1235   // Case 3 is handled by a recursive call.
1236   for (int row = start_row; row <= last_row; row++) {
1237     NearLabel next_test;
1238     bool test_for_null_also = (row == start_row);
1239 
1240     // See if the receiver is receiver[n].
1241     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1242     test_mdp_data_at(mdp, recvr_offset, receiver,
1243                      (test_for_null_also ? reg2 : noreg),
1244                      next_test);
1245     // (Reg2 now contains the receiver from the CallData.)
1246 
1247     // The receiver is receiver[n]. Increment count[n].
1248     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1249     increment_mdp_data_at(mdp, count_offset);
1250     z_bru(done);
1251     bind(next_test);
1252 
1253     if (test_for_null_also) {
1254       Label found_null;
1255       // Failed the equality check on receiver[n]... Test for null.
1256       z_ltgr(reg2, reg2);
1257       if (start_row == last_row) {
1258         // The only thing left to do is handle the null case.
1259         if (is_virtual_call) {
1260           z_brz(found_null);
1261           // Receiver did not match any saved receiver and there is no empty row for it.
1262           // Increment total counter to indicate polymorphic case.
1263           increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1264           z_bru(done);
1265           bind(found_null);
1266         } else {
1267           z_brnz(done);
1268         }
1269         break;
1270       }
1271       // Since null is rare, make it be the branch-taken case.
1272       z_brz(found_null);
1273 
1274       // Put all the "Case 3" tests here.
1275       record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
1276 
1277       // Found a null. Keep searching for a matching receiver,
1278       // but remember that this is an empty (unused) slot.
1279       bind(found_null);
1280     }
1281   }
1282 
1283   // In the fall-through case, we found no matching receiver, but we
1284   // observed the receiver[start_row] is NULL.
1285 
1286   // Fill in the receiver field and increment the count.
1287   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1288   set_mdp_data_at(mdp, recvr_offset, receiver);
1289   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1290   load_const_optimized(reg2, DataLayout::counter_increment);
1291   set_mdp_data_at(mdp, count_offset, reg2);
1292   if (start_row > 0) {
1293     z_bru(done);
1294   }
1295 }
1296 
1297 // Example state machine code for three profile rows:
1298 //   // main copy of decision tree, rooted at row[1]
1299 //   if (row[0].rec == rec) { row[0].incr(); goto done; }
1300 //   if (row[0].rec != NULL) {
1301 //     // inner copy of decision tree, rooted at row[1]
1302 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1303 //     if (row[1].rec != NULL) {
1304 //       // degenerate decision tree, rooted at row[2]
1305 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1306 //       if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
1307 //       row[2].init(rec); goto done;
1308 //     } else {
1309 //       // remember row[1] is empty
1310 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1311 //       row[1].init(rec); goto done;
1312 //     }
1313 //   } else {
1314 //     // remember row[0] is empty
1315 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1316 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
1317 //     row[0].init(rec); goto done;
1318 //   }
1319 //   done:
1320 
1321 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1322                                                         Register mdp, Register reg2,
1323                                                         bool is_virtual_call) {
1324   assert(ProfileInterpreter, "must be profiling");
1325   Label done;
1326 
1327   record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
1328 
1329   bind (done);
1330 }
1331 
1332 void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) {
1333   if (ProfileInterpreter) {
1334     NearLabel profile_continue;
1335     uint row;
1336 
1337     // If no method data exists, go to profile_continue.
1338     test_method_data_pointer(mdp, profile_continue);
1339 
1340     // Update the total ret count.
1341     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1342 
1343     for (row = 0; row < RetData::row_limit(); row++) {
1344       NearLabel next_test;
1345 
1346       // See if return_bci is equal to bci[n]:
1347       test_mdp_data_at(mdp,
1348                        in_bytes(RetData::bci_offset(row)),
1349                        return_bci, noreg,
1350                        next_test);
1351 
1352       // Return_bci is equal to bci[n]. Increment the count.
1353       increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
1354 
1355       // The method data pointer needs to be updated to reflect the new target.
1356       update_mdp_by_offset(mdp, in_bytes(RetData::bci_displacement_offset(row)));
1357       z_bru(profile_continue);
1358       bind(next_test);
1359     }
1360 
1361     update_mdp_for_ret(return_bci);
1362 
1363     bind(profile_continue);
1364   }
1365 }
1366 
1367 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1368   if (ProfileInterpreter) {
1369     Label profile_continue;
1370 
1371     // If no method data exists, go to profile_continue.
1372     test_method_data_pointer(mdp, profile_continue);
1373 
1374     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1375 
1376     // The method data pointer needs to be updated.
1377     int mdp_delta = in_bytes(BitData::bit_data_size());
1378     if (TypeProfileCasts) {
1379       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1380     }
1381     update_mdp_by_constant(mdp, mdp_delta);
1382 
1383     bind(profile_continue);
1384   }
1385 }
1386 
1387 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp, Register tmp) {
1388   if (ProfileInterpreter && TypeProfileCasts) {
1389     Label profile_continue;
1390 
1391     // If no method data exists, go to profile_continue.
1392     test_method_data_pointer(mdp, profile_continue);
1393 
1394     int count_offset = in_bytes(CounterData::count_offset());
1395     // Back up the address, since we have already bumped the mdp.
1396     count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1397 
1398     // *Decrement* the counter. We expect to see zero or small negatives.
1399     increment_mdp_data_at(mdp, count_offset, tmp, true);
1400 
1401     bind (profile_continue);
1402   }
1403 }
1404 
1405 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1406   if (ProfileInterpreter) {
1407     Label profile_continue;
1408 
1409     // If no method data exists, go to profile_continue.
1410     test_method_data_pointer(mdp, profile_continue);
1411 
1412     // The method data pointer needs to be updated.
1413     int mdp_delta = in_bytes(BitData::bit_data_size());
1414     if (TypeProfileCasts) {
1415       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1416 
1417       // Record the object type.
1418       record_klass_in_profile(klass, mdp, reg2, false);
1419     }
1420     update_mdp_by_constant(mdp, mdp_delta);
1421 
1422     bind(profile_continue);
1423   }
1424 }
1425 
1426 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1427   if (ProfileInterpreter) {
1428     Label profile_continue;
1429 
1430     // If no method data exists, go to profile_continue.
1431     test_method_data_pointer(mdp, profile_continue);
1432 
1433     // Update the default case count.
1434     increment_mdp_data_at(mdp, in_bytes(MultiBranchData::default_count_offset()));
1435 
1436     // The method data pointer needs to be updated.
1437     update_mdp_by_offset(mdp, in_bytes(MultiBranchData::default_displacement_offset()));
1438 
1439     bind(profile_continue);
1440   }
1441 }
1442 
1443 // Kills: index, scratch1, scratch2.
1444 void InterpreterMacroAssembler::profile_switch_case(Register index,
1445                                                     Register mdp,
1446                                                     Register scratch1,
1447                                                     Register scratch2) {
1448   if (ProfileInterpreter) {
1449     Label profile_continue;
1450     assert_different_registers(index, mdp, scratch1, scratch2);
1451 
1452     // If no method data exists, go to profile_continue.
1453     test_method_data_pointer(mdp, profile_continue);
1454 
1455     // Build the base (index * per_case_size_in_bytes()) +
1456     // case_array_offset_in_bytes().
1457     z_sllg(index, index, exact_log2(in_bytes(MultiBranchData::per_case_size())));
1458     add2reg(index, in_bytes(MultiBranchData::case_array_offset()));
1459 
1460     // Add the calculated base to the mdp -> address of the case' data.
1461     Address case_data_addr(mdp, index);
1462     Register case_data = scratch1;
1463     load_address(case_data, case_data_addr);
1464 
1465     // Update the case count.
1466     increment_mdp_data_at(case_data,
1467                           in_bytes(MultiBranchData::relative_count_offset()),
1468                           scratch2);
1469 
1470     // The method data pointer needs to be updated.
1471     update_mdp_by_offset(mdp,
1472                          index,
1473                          in_bytes(MultiBranchData::relative_displacement_offset()));
1474 
1475     bind(profile_continue);
1476   }
1477 }
1478 
1479 // kills: R0, R1, flags, loads klass from obj (if not null)
1480 void InterpreterMacroAssembler::profile_obj_type(Register obj, Address mdo_addr, Register klass, bool cmp_done) {
1481   NearLabel null_seen, init_klass, do_nothing, do_update;
1482 
1483   // Klass = obj is allowed.
1484   const Register tmp = Z_R1;
1485   assert_different_registers(obj, mdo_addr.base(), tmp, Z_R0);
1486   assert_different_registers(klass, mdo_addr.base(), tmp, Z_R0);
1487 
1488   z_lg(tmp, mdo_addr);
1489   if (cmp_done) {
1490     z_brz(null_seen);
1491   } else {
1492     compareU64_and_branch(obj, (intptr_t)0, Assembler::bcondEqual, null_seen);
1493   }
1494 
1495   MacroAssembler::verify_oop(obj, FILE_AND_LINE);
1496   load_klass(klass, obj);
1497 
1498   // Klass seen before, nothing to do (regardless of unknown bit).
1499   z_lgr(Z_R0, tmp);
1500   assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction");
1501   z_nill(Z_R0, TypeEntries::type_klass_mask & 0xFFFF);
1502   compareU64_and_branch(Z_R0, klass, Assembler::bcondEqual, do_nothing);
1503 
1504   // Already unknown. Nothing to do anymore.
1505   z_tmll(tmp, TypeEntries::type_unknown);
1506   z_brc(Assembler::bcondAllOne, do_nothing);
1507 
1508   z_lgr(Z_R0, tmp);
1509   assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction");
1510   z_nill(Z_R0, TypeEntries::type_mask & 0xFFFF);
1511   compareU64_and_branch(Z_R0, (intptr_t)0, Assembler::bcondEqual, init_klass);
1512 
1513   // Different than before. Cannot keep accurate profile.
1514   z_oill(tmp, TypeEntries::type_unknown);
1515   z_bru(do_update);
1516 
1517   bind(init_klass);
1518   // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
1519   z_ogr(tmp, klass);
1520   z_bru(do_update);
1521 
1522   bind(null_seen);
1523   // Set null_seen if obj is 0.
1524   z_oill(tmp, TypeEntries::null_seen);
1525   // fallthru: z_bru(do_update);
1526 
1527   bind(do_update);
1528   z_stg(tmp, mdo_addr);
1529 
1530   bind(do_nothing);
1531 }
1532 
1533 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
1534   if (!ProfileInterpreter) {
1535     return;
1536   }
1537 
1538   assert_different_registers(mdp, callee, tmp);
1539 
1540   if (MethodData::profile_arguments() || MethodData::profile_return()) {
1541     Label profile_continue;
1542 
1543     test_method_data_pointer(mdp, profile_continue);
1544 
1545     int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
1546 
1547     z_cliy(in_bytes(DataLayout::tag_offset()) - off_to_start, mdp,
1548            is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
1549     z_brne(profile_continue);
1550 
1551     if (MethodData::profile_arguments()) {
1552       NearLabel done;
1553       int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
1554       add2reg(mdp, off_to_args);
1555 
1556       for (int i = 0; i < TypeProfileArgsLimit; i++) {
1557         if (i > 0 || MethodData::profile_return()) {
1558           // If return value type is profiled we may have no argument to profile.
1559           z_lg(tmp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, mdp);
1560           add2reg(tmp, -i*TypeStackSlotEntries::per_arg_count());
1561           compare64_and_branch(tmp, TypeStackSlotEntries::per_arg_count(), Assembler::bcondLow, done);
1562         }
1563         z_lg(tmp, Address(callee, Method::const_offset()));
1564         z_lgh(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
1565         // Stack offset o (zero based) from the start of the argument
1566         // list. For n arguments translates into offset n - o - 1 from
1567         // the end of the argument list. But there is an extra slot at
1568         // the top of the stack. So the offset is n - o from Lesp.
1569         z_sg(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
1570         z_sllg(tmp, tmp, Interpreter::logStackElementSize);
1571         Address stack_slot_addr(tmp, Z_esp);
1572         z_ltg(tmp, stack_slot_addr);
1573 
1574         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
1575         profile_obj_type(tmp, mdo_arg_addr, tmp, /*ltg did compare to 0*/ true);
1576 
1577         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1578         add2reg(mdp, to_add);
1579         off_to_args += to_add;
1580       }
1581 
1582       if (MethodData::profile_return()) {
1583         z_lg(tmp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, mdp);
1584         add2reg(tmp, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1585       }
1586 
1587       bind(done);
1588 
1589       if (MethodData::profile_return()) {
1590         // We're right after the type profile for the last
1591         // argument. Tmp is the number of cells left in the
1592         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1593         // if there's a return to profile.
1594         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1595         z_sllg(tmp, tmp, exact_log2(DataLayout::cell_size));
1596         z_agr(mdp, tmp);
1597       }
1598       z_stg(mdp, _z_ijava_state_neg(mdx), Z_fp);
1599     } else {
1600       assert(MethodData::profile_return(), "either profile call args or call ret");
1601       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1602     }
1603 
1604     // Mdp points right after the end of the
1605     // CallTypeData/VirtualCallTypeData, right after the cells for the
1606     // return value type if there's one.
1607     bind(profile_continue);
1608   }
1609 }
1610 
1611 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1612   assert_different_registers(mdp, ret, tmp);
1613   if (ProfileInterpreter && MethodData::profile_return()) {
1614     Label profile_continue;
1615 
1616     test_method_data_pointer(mdp, profile_continue);
1617 
1618     if (MethodData::profile_return_jsr292_only()) {
1619       // If we don't profile all invoke bytecodes we must make sure
1620       // it's a bytecode we indeed profile. We can't go back to the
1621       // beginning of the ProfileData we intend to update to check its
1622       // type because we're right after it and we don't known its
1623       // length.
1624       NearLabel do_profile;
1625       Address bc(Z_bcp);
1626       z_lb(tmp, bc);
1627       compare32_and_branch(tmp, Bytecodes::_invokedynamic, Assembler::bcondEqual, do_profile);
1628       compare32_and_branch(tmp, Bytecodes::_invokehandle, Assembler::bcondEqual, do_profile);
1629       get_method(tmp);
1630       // Supplement to 8139891: _intrinsic_id exceeded 1-byte size limit.
1631       if (Method::intrinsic_id_size_in_bytes() == 1) {
1632         z_cli(Method::intrinsic_id_offset_in_bytes(), tmp, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1633       } else {
1634         assert(Method::intrinsic_id_size_in_bytes() == 2, "size error: check Method::_intrinsic_id");
1635         z_lh(tmp, Method::intrinsic_id_offset_in_bytes(), Z_R0, tmp);
1636         z_chi(tmp, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1637       }
1638       z_brne(profile_continue);
1639 
1640       bind(do_profile);
1641     }
1642 
1643     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1644     profile_obj_type(ret, mdo_ret_addr, tmp);
1645 
1646     bind(profile_continue);
1647   }
1648 }
1649 
1650 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1651   if (ProfileInterpreter && MethodData::profile_parameters()) {
1652     Label profile_continue, done;
1653 
1654     test_method_data_pointer(mdp, profile_continue);
1655 
1656     // Load the offset of the area within the MDO used for
1657     // parameters. If it's negative we're not profiling any parameters.
1658     Address parm_di_addr(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()));
1659     load_and_test_int2long(tmp1, parm_di_addr);
1660     z_brl(profile_continue);
1661 
1662     // Compute a pointer to the area for parameters from the offset
1663     // and move the pointer to the slot for the last
1664     // parameters. Collect profiling from last parameter down.
1665     // mdo start + parameters offset + array length - 1
1666 
1667     // Pointer to the parameter area in the MDO.
1668     z_agr(mdp, tmp1);
1669 
1670     // Offset of the current profile entry to update.
1671     const Register entry_offset = tmp1;
1672     // entry_offset = array len in number of cells.
1673     z_lg(entry_offset, Address(mdp, ArrayData::array_len_offset()));
1674     // entry_offset (number of cells) = array len - size of 1 entry
1675     add2reg(entry_offset, -TypeStackSlotEntries::per_arg_count());
1676     // entry_offset in bytes
1677     z_sllg(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
1678 
1679     Label loop;
1680     bind(loop);
1681 
1682     Address arg_off(mdp, entry_offset, ParametersTypeData::stack_slot_offset(0));
1683     Address arg_type(mdp, entry_offset, ParametersTypeData::type_offset(0));
1684 
1685     // Load offset on the stack from the slot for this parameter.
1686     z_lg(tmp2, arg_off);
1687     z_sllg(tmp2, tmp2, Interpreter::logStackElementSize);
1688     z_lcgr(tmp2); // Negate.
1689 
1690     // Profile the parameter.
1691     z_ltg(tmp2, Address(Z_locals, tmp2));
1692     profile_obj_type(tmp2, arg_type, tmp2, /*ltg did compare to 0*/ true);
1693 
1694     // Go to next parameter.
1695     z_aghi(entry_offset, -TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size);
1696     z_brnl(loop);
1697 
1698     bind(profile_continue);
1699   }
1700 }
1701 
1702 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
1703 void InterpreterMacroAssembler::increment_mask_and_jump(Address          counter_addr,
1704                                                         int              increment,
1705                                                         Address          mask,
1706                                                         Register         scratch,
1707                                                         bool             preloaded,
1708                                                         branch_condition cond,
1709                                                         Label           *where) {
1710   assert_different_registers(counter_addr.base(), scratch);
1711   if (preloaded) {
1712     add2reg(scratch, increment);
1713     reg2mem_opt(scratch, counter_addr, false);
1714   } else {
1715     if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(increment) && counter_addr.is_RSYform()) {
1716       z_alsi(counter_addr.disp20(), counter_addr.base(), increment);
1717       mem2reg_signed_opt(scratch, counter_addr);
1718     } else {
1719       mem2reg_signed_opt(scratch, counter_addr);
1720       add2reg(scratch, increment);
1721       reg2mem_opt(scratch, counter_addr, false);
1722     }
1723   }
1724   z_n(scratch, mask);
1725   if (where) { z_brc(cond, *where); }
1726 }
1727 
1728 // Get MethodCounters object for given method. Lazily allocated if necessary.
1729 //   method    - Ptr to Method object.
1730 //   Rcounters - Ptr to MethodCounters object associated with Method object.
1731 //   skip      - Exit point if MethodCounters object can't be created (OOM condition).
1732 void InterpreterMacroAssembler::get_method_counters(Register Rmethod,
1733                                                     Register Rcounters,
1734                                                     Label& skip) {
1735   assert_different_registers(Rmethod, Rcounters);
1736 
1737   BLOCK_COMMENT("get MethodCounters object {");
1738 
1739   Label has_counters;
1740   load_and_test_long(Rcounters, Address(Rmethod, Method::method_counters_offset()));
1741   z_brnz(has_counters);
1742 
1743   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), Rmethod);
1744   z_ltgr(Rcounters, Z_RET); // Runtime call returns MethodCounters object.
1745   z_brz(skip); // No MethodCounters, out of memory.
1746 
1747   bind(has_counters);
1748 
1749   BLOCK_COMMENT("} get MethodCounters object");
1750 }
1751 
1752 // Increment invocation counter in MethodCounters object.
1753 // Return (invocation_counter+backedge_counter) as "result" in RctrSum.
1754 // Counter values are all unsigned.
1755 void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register RctrSum) {
1756   assert(UseCompiler, "incrementing must be useful");
1757   assert_different_registers(Rcounters, RctrSum);
1758 
1759   int increment          = InvocationCounter::count_increment;
1760   int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset());
1761   int be_counter_offset  = in_bytes(MethodCounters::backedge_counter_offset()   + InvocationCounter::counter_offset());
1762 
1763   BLOCK_COMMENT("Increment invocation counter {");
1764 
1765   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(increment)) {
1766     // Increment the invocation counter in place,
1767     // then add the incremented value to the backedge counter.
1768     z_l(RctrSum, be_counter_offset, Rcounters);
1769     z_alsi(inv_counter_offset, Rcounters, increment);     // Atomic increment @no extra cost!
1770     z_nilf(RctrSum, InvocationCounter::count_mask_value); // Mask off state bits.
1771     z_al(RctrSum, inv_counter_offset, Z_R0, Rcounters);
1772   } else {
1773     // This path is optimized for low register consumption
1774     // at the cost of somewhat higher operand delays.
1775     // It does not need an extra temp register.
1776 
1777     // Update the invocation counter.
1778     z_l(RctrSum, inv_counter_offset, Rcounters);
1779     if (RctrSum == Z_R0) {
1780       z_ahi(RctrSum, increment);
1781     } else {
1782       add2reg(RctrSum, increment);
1783     }
1784     z_st(RctrSum, inv_counter_offset, Rcounters);
1785 
1786     // Mask off the state bits.
1787     z_nilf(RctrSum, InvocationCounter::count_mask_value);
1788 
1789     // Add the backedge counter to the updated invocation counter to
1790     // form the result.
1791     z_al(RctrSum, be_counter_offset, Z_R0, Rcounters);
1792   }
1793 
1794   BLOCK_COMMENT("} Increment invocation counter");
1795 
1796   // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
1797 }
1798 
1799 
1800 // increment backedge counter in MethodCounters object.
1801 // return (invocation_counter+backedge_counter) as "result" in RctrSum
1802 // counter values are all unsigned!
1803 void InterpreterMacroAssembler::increment_backedge_counter(Register Rcounters, Register RctrSum) {
1804   assert(UseCompiler, "incrementing must be useful");
1805   assert_different_registers(Rcounters, RctrSum);
1806 
1807   int increment          = InvocationCounter::count_increment;
1808   int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset());
1809   int be_counter_offset  = in_bytes(MethodCounters::backedge_counter_offset()   + InvocationCounter::counter_offset());
1810 
1811   BLOCK_COMMENT("Increment backedge counter {");
1812 
1813   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(increment)) {
1814     // Increment the invocation counter in place,
1815     // then add the incremented value to the backedge counter.
1816     z_l(RctrSum, inv_counter_offset, Rcounters);
1817     z_alsi(be_counter_offset, Rcounters, increment);      // Atomic increment @no extra cost!
1818     z_nilf(RctrSum, InvocationCounter::count_mask_value); // Mask off state bits.
1819     z_al(RctrSum, be_counter_offset, Z_R0, Rcounters);
1820   } else {
1821     // This path is optimized for low register consumption
1822     // at the cost of somewhat higher operand delays.
1823     // It does not need an extra temp register.
1824 
1825     // Update the invocation counter.
1826     z_l(RctrSum, be_counter_offset, Rcounters);
1827     if (RctrSum == Z_R0) {
1828       z_ahi(RctrSum, increment);
1829     } else {
1830       add2reg(RctrSum, increment);
1831     }
1832     z_st(RctrSum, be_counter_offset, Rcounters);
1833 
1834     // Mask off the state bits.
1835     z_nilf(RctrSum, InvocationCounter::count_mask_value);
1836 
1837     // Add the backedge counter to the updated invocation counter to
1838     // form the result.
1839     z_al(RctrSum, inv_counter_offset, Z_R0, Rcounters);
1840   }
1841 
1842   BLOCK_COMMENT("} Increment backedge counter");
1843 
1844   // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
1845 }
1846 
1847 // Add an InterpMonitorElem to stack (see frame_s390.hpp).
1848 void InterpreterMacroAssembler::add_monitor_to_stack(bool     stack_is_empty,
1849                                                      Register Rtemp1,
1850                                                      Register Rtemp2,
1851                                                      Register Rtemp3) {
1852 
1853   const Register Rcurr_slot = Rtemp1;
1854   const Register Rlimit     = Rtemp2;
1855   const jint delta = -frame::interpreter_frame_monitor_size() * wordSize;
1856 
1857   assert((delta & LongAlignmentMask) == 0,
1858          "sizeof BasicObjectLock must be even number of doublewords");
1859   assert(2 * wordSize == -delta, "this works only as long as delta == -2*wordSize");
1860   assert(Rcurr_slot != Z_R0, "Register must be usable as base register");
1861   assert_different_registers(Rlimit, Rcurr_slot, Rtemp3);
1862 
1863   get_monitors(Rlimit);
1864 
1865   // Adjust stack pointer for additional monitor entry.
1866   resize_frame(RegisterOrConstant((intptr_t) delta), Z_fp, false);
1867 
1868   if (!stack_is_empty) {
1869     // Must copy stack contents down.
1870     NearLabel next, done;
1871 
1872     // Rtemp := addr(Tos), Z_esp is pointing below it!
1873     add2reg(Rcurr_slot, wordSize, Z_esp);
1874 
1875     // Nothing to do, if already at monitor area.
1876     compareU64_and_branch(Rcurr_slot, Rlimit, bcondNotLow, done);
1877 
1878     bind(next);
1879 
1880     // Move one stack slot.
1881     mem2reg_opt(Rtemp3, Address(Rcurr_slot));
1882     reg2mem_opt(Rtemp3, Address(Rcurr_slot, delta));
1883     add2reg(Rcurr_slot, wordSize);
1884     compareU64_and_branch(Rcurr_slot, Rlimit, bcondLow, next); // Are we done?
1885 
1886     bind(done);
1887     // Done copying stack.
1888   }
1889 
1890   // Adjust expression stack and monitor pointers.
1891   add2reg(Z_esp, delta);
1892   add2reg(Rlimit, delta);
1893   save_monitors(Rlimit);
1894 }
1895 
1896 // Note: Index holds the offset in bytes afterwards.
1897 // You can use this to store a new value (with Llocals as the base).
1898 void InterpreterMacroAssembler::access_local_int(Register index, Register dst) {
1899   z_sllg(index, index, LogBytesPerWord);
1900   mem2reg_opt(dst, Address(Z_locals, index), false);
1901 }
1902 
1903 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1904   if (state == atos) { MacroAssembler::verify_oop(reg, FILE_AND_LINE); }
1905 }
1906 
1907 // Inline assembly for:
1908 //
1909 // if (thread is in interp_only_mode) {
1910 //   InterpreterRuntime::post_method_entry();
1911 // }
1912 
1913 void InterpreterMacroAssembler::notify_method_entry() {
1914 
1915   // JVMTI
1916   // Whenever JVMTI puts a thread in interp_only_mode, method
1917   // entry/exit events are sent for that thread to track stack
1918   // depth. If it is possible to enter interp_only_mode we add
1919   // the code to check if the event should be sent.
1920   if (JvmtiExport::can_post_interpreter_events()) {
1921     Label jvmti_post_done;
1922     MacroAssembler::load_and_test_int(Z_R0, Address(Z_thread, JavaThread::interp_only_mode_offset()));
1923     z_bre(jvmti_post_done);
1924     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
1925     bind(jvmti_post_done);
1926   }
1927 }
1928 
1929 // Inline assembly for:
1930 //
1931 // if (thread is in interp_only_mode) {
1932 //   if (!native_method) save result
1933 //   InterpreterRuntime::post_method_exit();
1934 //   if (!native_method) restore result
1935 // }
1936 // if (DTraceMethodProbes) {
1937 //   SharedRuntime::dtrace_method_exit(thread, method);
1938 // }
1939 //
1940 // For native methods their result is stored in z_ijava_state.lresult
1941 // and z_ijava_state.fresult before coming here.
1942 // Java methods have their result stored in the expression stack.
1943 //
1944 // Notice the dependency to frame::interpreter_frame_result().
1945 void InterpreterMacroAssembler::notify_method_exit(bool native_method,
1946                                                    TosState state,
1947                                                    NotifyMethodExitMode mode) {
1948   // JVMTI
1949   // Whenever JVMTI puts a thread in interp_only_mode, method
1950   // entry/exit events are sent for that thread to track stack
1951   // depth. If it is possible to enter interp_only_mode we add
1952   // the code to check if the event should be sent.
1953   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1954     Label jvmti_post_done;
1955     MacroAssembler::load_and_test_int(Z_R0, Address(Z_thread, JavaThread::interp_only_mode_offset()));
1956     z_bre(jvmti_post_done);
1957     if (!native_method) push(state); // see frame::interpreter_frame_result()
1958     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1959     if (!native_method) pop(state);
1960     bind(jvmti_post_done);
1961   }
1962 
1963 #if 0
1964   // Dtrace currently not supported on z/Architecture.
1965   {
1966     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1967     push(state);
1968     get_method(c_rarg1);
1969     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1970                  r15_thread, c_rarg1);
1971     pop(state);
1972   }
1973 #endif
1974 }
1975 
1976 void InterpreterMacroAssembler::skip_if_jvmti_mode(Label &Lskip, Register Rscratch) {
1977   if (!JvmtiExport::can_post_interpreter_events()) {
1978     return;
1979   }
1980 
1981   load_and_test_int(Rscratch, Address(Z_thread, JavaThread::interp_only_mode_offset()));
1982   z_brnz(Lskip);
1983 
1984 }
1985 
1986 // Pop the topmost TOP_IJAVA_FRAME and set it's sender_sp as new Z_SP.
1987 // The return pc is loaded into the register return_pc.
1988 //
1989 // Registers updated:
1990 //     return_pc  - The return pc of the calling frame.
1991 //     tmp1, tmp2 - scratch
1992 void InterpreterMacroAssembler::pop_interpreter_frame(Register return_pc, Register tmp1, Register tmp2) {
1993   // F0  Z_SP -> caller_sp (F1's)
1994   //             ...
1995   //             sender_sp (F1's)
1996   //             ...
1997   // F1  Z_fp -> caller_sp (F2's)
1998   //             return_pc (Continuation after return from F0.)
1999   //             ...
2000   // F2          caller_sp
2001 
2002   // Remove F0's activation. Restoring Z_SP to sender_sp reverts modifications
2003   // (a) by a c2i adapter and (b) by generate_fixed_frame().
2004   // In case (a) the new top frame F1 is an unextended compiled frame.
2005   // In case (b) F1 is converted from PARENT_IJAVA_FRAME to TOP_IJAVA_FRAME.
2006 
2007   // Case (b) seems to be redundant when returning to a interpreted caller,
2008   // because then the caller's top_frame_sp is installed as sp (see
2009   // TemplateInterpreterGenerator::generate_return_entry_for ()). But
2010   // pop_interpreter_frame() is also used in exception handling and there the
2011   // frame type of the caller is unknown, therefore top_frame_sp cannot be used,
2012   // so it is important that sender_sp is the caller's sp as TOP_IJAVA_FRAME.
2013 
2014   Register R_f1_sender_sp = tmp1;
2015   Register R_f2_sp = tmp2;
2016 
2017   // First check for the interpreter frame's magic.
2018   asm_assert_ijava_state_magic(R_f2_sp/*tmp*/);
2019   z_lg(R_f2_sp, _z_parent_ijava_frame_abi(callers_sp), Z_fp);
2020   z_lg(R_f1_sender_sp, _z_ijava_state_neg(sender_sp), Z_fp);
2021   if (return_pc->is_valid())
2022     z_lg(return_pc, _z_parent_ijava_frame_abi(return_pc), Z_fp);
2023   // Pop F0 by resizing to R_f1_sender_sp and using R_f2_sp as fp.
2024   resize_frame_absolute(R_f1_sender_sp, R_f2_sp, false/*load fp*/);
2025 
2026 #ifdef ASSERT
2027   // The return_pc in the new top frame is dead... at least that's my
2028   // current understanding; to assert this I overwrite it.
2029   load_const_optimized(Z_ARG3, 0xb00b1);
2030   z_stg(Z_ARG3, _z_parent_ijava_frame_abi(return_pc), Z_SP);
2031 #endif
2032 }
2033 
2034 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2035   if (VerifyFPU) {
2036     unimplemented("verfiyFPU");
2037   }
2038 }