1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "compiler/disassembler.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "gc/shared/tlab_globals.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "interpreter/interpreterRuntime.hpp"
  32 #include "interpreter/interp_masm.hpp"
  33 #include "interpreter/templateTable.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/methodCounters.hpp"
  36 #include "oops/methodData.hpp"
  37 #include "oops/objArrayKlass.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/inlineKlass.hpp"
  40 #include "oops/resolvedFieldEntry.hpp"
  41 #include "oops/resolvedIndyEntry.hpp"
  42 #include "oops/resolvedMethodEntry.hpp"
  43 #include "prims/jvmtiExport.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "runtime/synchronizer.hpp"
  50 #include "utilities/macros.hpp"
  51 
  52 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  53 
  54 // Global Register Names
  55 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  56 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  57 
  58 // Address Computation: local variables
  59 static inline Address iaddress(int n) {
  60   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  61 }
  62 
  63 static inline Address laddress(int n) {
  64   return iaddress(n + 1);
  65 }
  66 
  67 #ifndef _LP64
  68 static inline Address haddress(int n) {
  69   return iaddress(n + 0);
  70 }
  71 #endif
  72 
  73 static inline Address faddress(int n) {
  74   return iaddress(n);
  75 }
  76 
  77 static inline Address daddress(int n) {
  78   return laddress(n);
  79 }
  80 
  81 static inline Address aaddress(int n) {
  82   return iaddress(n);
  83 }
  84 
  85 static inline Address iaddress(Register r) {
  86   return Address(rlocals, r, Address::times_ptr);
  87 }
  88 
  89 static inline Address laddress(Register r) {
  90   return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
  91 }
  92 
  93 #ifndef _LP64
  94 static inline Address haddress(Register r)       {
  95   return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
  96 }
  97 #endif
  98 
  99 static inline Address faddress(Register r) {
 100   return iaddress(r);
 101 }
 102 
 103 static inline Address daddress(Register r) {
 104   return laddress(r);
 105 }
 106 
 107 static inline Address aaddress(Register r) {
 108   return iaddress(r);
 109 }
 110 
 111 
 112 // expression stack
 113 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
 114 // data beyond the rsp which is potentially unsafe in an MT environment;
 115 // an interrupt may overwrite that data.)
 116 static inline Address at_rsp   () {
 117   return Address(rsp, 0);
 118 }
 119 
 120 // At top of Java expression stack which may be different than esp().  It
 121 // isn't for category 1 objects.
 122 static inline Address at_tos   () {
 123   return Address(rsp,  Interpreter::expr_offset_in_bytes(0));
 124 }
 125 
 126 static inline Address at_tos_p1() {
 127   return Address(rsp,  Interpreter::expr_offset_in_bytes(1));
 128 }
 129 
 130 static inline Address at_tos_p2() {
 131   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
 132 }
 133 
 134 // Condition conversion
 135 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 136   switch (cc) {
 137   case TemplateTable::equal        : return Assembler::notEqual;
 138   case TemplateTable::not_equal    : return Assembler::equal;
 139   case TemplateTable::less         : return Assembler::greaterEqual;
 140   case TemplateTable::less_equal   : return Assembler::greater;
 141   case TemplateTable::greater      : return Assembler::lessEqual;
 142   case TemplateTable::greater_equal: return Assembler::less;
 143   }
 144   ShouldNotReachHere();
 145   return Assembler::zero;
 146 }
 147 
 148 
 149 
 150 // Miscellaneous helper routines
 151 // Store an oop (or null) at the address described by obj.
 152 // If val == noreg this means store a null
 153 
 154 
 155 static void do_oop_store(InterpreterMacroAssembler* _masm,
 156                          Address dst,
 157                          Register val,
 158                          DecoratorSet decorators = 0) {
 159   assert(val == noreg || val == rax, "parameter is just for looks");
 160   __ store_heap_oop(dst, val,
 161                     NOT_LP64(rdx) LP64_ONLY(rscratch2),
 162                     NOT_LP64(rbx) LP64_ONLY(r9),
 163                     NOT_LP64(rsi) LP64_ONLY(r8), decorators);
 164 }
 165 
 166 static void do_oop_load(InterpreterMacroAssembler* _masm,
 167                         Address src,
 168                         Register dst,
 169                         DecoratorSet decorators = 0) {
 170   __ load_heap_oop(dst, src, rdx, rbx, decorators);
 171 }
 172 
 173 Address TemplateTable::at_bcp(int offset) {
 174   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 175   return Address(rbcp, offset);
 176 }
 177 
 178 
 179 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 180                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 181                                    int byte_no) {
 182   if (!RewriteBytecodes)  return;
 183   Label L_patch_done;
 184 
 185   switch (bc) {
 186   case Bytecodes::_fast_vputfield:
 187   case Bytecodes::_fast_aputfield:
 188   case Bytecodes::_fast_bputfield:
 189   case Bytecodes::_fast_zputfield:
 190   case Bytecodes::_fast_cputfield:
 191   case Bytecodes::_fast_dputfield:
 192   case Bytecodes::_fast_fputfield:
 193   case Bytecodes::_fast_iputfield:
 194   case Bytecodes::_fast_lputfield:
 195   case Bytecodes::_fast_sputfield:
 196     {
 197       // We skip bytecode quickening for putfield instructions when
 198       // the put_code written to the constant pool cache is zero.
 199       // This is required so that every execution of this instruction
 200       // calls out to InterpreterRuntime::resolve_get_put to do
 201       // additional, required work.
 202       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 203       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 204       __ load_field_entry(temp_reg, bc_reg);
 205       if (byte_no == f1_byte) {
 206         __ load_unsigned_byte(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset())));
 207       } else {
 208         __ load_unsigned_byte(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset())));
 209       }
 210 
 211       __ movl(bc_reg, bc);
 212       __ cmpl(temp_reg, (int) 0);
 213       __ jcc(Assembler::zero, L_patch_done);  // don't patch
 214     }
 215     break;
 216   default:
 217     assert(byte_no == -1, "sanity");
 218     // the pair bytecodes have already done the load.
 219     if (load_bc_into_bc_reg) {
 220       __ movl(bc_reg, bc);
 221     }
 222   }
 223 
 224   if (JvmtiExport::can_post_breakpoint()) {
 225     Label L_fast_patch;
 226     // if a breakpoint is present we can't rewrite the stream directly
 227     __ movzbl(temp_reg, at_bcp(0));
 228     __ cmpl(temp_reg, Bytecodes::_breakpoint);
 229     __ jcc(Assembler::notEqual, L_fast_patch);
 230     __ get_method(temp_reg);
 231     // Let breakpoint table handling rewrite to quicker bytecode
 232     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
 233 #ifndef ASSERT
 234     __ jmpb(L_patch_done);
 235 #else
 236     __ jmp(L_patch_done);
 237 #endif
 238     __ bind(L_fast_patch);
 239   }
 240 
 241 #ifdef ASSERT
 242   Label L_okay;
 243   __ load_unsigned_byte(temp_reg, at_bcp(0));
 244   __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
 245   __ jcc(Assembler::equal, L_okay);
 246   __ cmpl(temp_reg, bc_reg);
 247   __ jcc(Assembler::equal, L_okay);
 248   __ stop("patching the wrong bytecode");
 249   __ bind(L_okay);
 250 #endif
 251 
 252   // patch bytecode
 253   __ movb(at_bcp(0), bc_reg);
 254   __ bind(L_patch_done);
 255 }
 256 // Individual instructions
 257 
 258 
 259 void TemplateTable::nop() {
 260   transition(vtos, vtos);
 261   // nothing to do
 262 }
 263 
 264 void TemplateTable::shouldnotreachhere() {
 265   transition(vtos, vtos);
 266   __ stop("shouldnotreachhere bytecode");
 267 }
 268 
 269 void TemplateTable::aconst_null() {
 270   transition(vtos, atos);
 271   __ xorl(rax, rax);
 272 }
 273 
 274 void TemplateTable::iconst(int value) {
 275   transition(vtos, itos);
 276   if (value == 0) {
 277     __ xorl(rax, rax);
 278   } else {
 279     __ movl(rax, value);
 280   }
 281 }
 282 
 283 void TemplateTable::lconst(int value) {
 284   transition(vtos, ltos);
 285   if (value == 0) {
 286     __ xorl(rax, rax);
 287   } else {
 288     __ movl(rax, value);
 289   }
 290 #ifndef _LP64
 291   assert(value >= 0, "check this code");
 292   __ xorptr(rdx, rdx);
 293 #endif
 294 }
 295 
 296 
 297 
 298 void TemplateTable::fconst(int value) {
 299   transition(vtos, ftos);
 300   if (UseSSE >= 1) {
 301     static float one = 1.0f, two = 2.0f;
 302     switch (value) {
 303     case 0:
 304       __ xorps(xmm0, xmm0);
 305       break;
 306     case 1:
 307       __ movflt(xmm0, ExternalAddress((address) &one), rscratch1);
 308       break;
 309     case 2:
 310       __ movflt(xmm0, ExternalAddress((address) &two), rscratch1);
 311       break;
 312     default:
 313       ShouldNotReachHere();
 314       break;
 315     }
 316   } else {
 317 #ifdef _LP64
 318     ShouldNotReachHere();
 319 #else
 320            if (value == 0) { __ fldz();
 321     } else if (value == 1) { __ fld1();
 322     } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
 323     } else                 { ShouldNotReachHere();
 324     }
 325 #endif // _LP64
 326   }
 327 }
 328 
 329 void TemplateTable::dconst(int value) {
 330   transition(vtos, dtos);
 331   if (UseSSE >= 2) {
 332     static double one = 1.0;
 333     switch (value) {
 334     case 0:
 335       __ xorpd(xmm0, xmm0);
 336       break;
 337     case 1:
 338       __ movdbl(xmm0, ExternalAddress((address) &one), rscratch1);
 339       break;
 340     default:
 341       ShouldNotReachHere();
 342       break;
 343     }
 344   } else {
 345 #ifdef _LP64
 346     ShouldNotReachHere();
 347 #else
 348            if (value == 0) { __ fldz();
 349     } else if (value == 1) { __ fld1();
 350     } else                 { ShouldNotReachHere();
 351     }
 352 #endif
 353   }
 354 }
 355 
 356 void TemplateTable::bipush() {
 357   transition(vtos, itos);
 358   __ load_signed_byte(rax, at_bcp(1));
 359 }
 360 
 361 void TemplateTable::sipush() {
 362   transition(vtos, itos);
 363   __ load_unsigned_short(rax, at_bcp(1));
 364   __ bswapl(rax);
 365   __ sarl(rax, 16);
 366 }
 367 
 368 void TemplateTable::ldc(LdcType type) {
 369   transition(vtos, vtos);
 370   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 371   Label call_ldc, notFloat, notClass, notInt, Done;
 372 
 373   if (is_ldc_wide(type)) {
 374     __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 375   } else {
 376     __ load_unsigned_byte(rbx, at_bcp(1));
 377   }
 378 
 379   __ get_cpool_and_tags(rcx, rax);
 380   const int base_offset = ConstantPool::header_size() * wordSize;
 381   const int tags_offset = Array<u1>::base_offset_in_bytes();
 382 
 383   // get type
 384   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 385 
 386   // unresolved class - get the resolved class
 387   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
 388   __ jccb(Assembler::equal, call_ldc);
 389 
 390   // unresolved class in error state - call into runtime to throw the error
 391   // from the first resolution attempt
 392   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
 393   __ jccb(Assembler::equal, call_ldc);
 394 
 395   // resolved class - need to call vm to get java mirror of the class
 396   __ cmpl(rdx, JVM_CONSTANT_Class);
 397   __ jcc(Assembler::notEqual, notClass);
 398 
 399   __ bind(call_ldc);
 400 
 401   __ movl(rarg, is_ldc_wide(type) ? 1 : 0);
 402   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
 403 
 404   __ push(atos);
 405   __ jmp(Done);
 406 
 407   __ bind(notClass);
 408   __ cmpl(rdx, JVM_CONSTANT_Float);
 409   __ jccb(Assembler::notEqual, notFloat);
 410 
 411   // ftos
 412   __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
 413   __ push(ftos);
 414   __ jmp(Done);
 415 
 416   __ bind(notFloat);
 417   __ cmpl(rdx, JVM_CONSTANT_Integer);
 418   __ jccb(Assembler::notEqual, notInt);
 419 
 420   // itos
 421   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
 422   __ push(itos);
 423   __ jmp(Done);
 424 
 425   // assume the tag is for condy; if not, the VM runtime will tell us
 426   __ bind(notInt);
 427   condy_helper(Done);
 428 
 429   __ bind(Done);
 430 }
 431 
 432 // Fast path for caching oop constants.
 433 void TemplateTable::fast_aldc(LdcType type) {
 434   transition(vtos, atos);
 435 
 436   Register result = rax;
 437   Register tmp = rdx;
 438   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 439   int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
 440 
 441   Label resolved;
 442 
 443   // We are resolved if the resolved reference cache entry contains a
 444   // non-null object (String, MethodType, etc.)
 445   assert_different_registers(result, tmp);
 446   __ get_cache_index_at_bcp(tmp, 1, index_size);
 447   __ load_resolved_reference_at_index(result, tmp);
 448   __ testptr(result, result);
 449   __ jcc(Assembler::notZero, resolved);
 450 
 451   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 452 
 453   // first time invocation - must resolve first
 454   __ movl(rarg, (int)bytecode());
 455   __ call_VM(result, entry, rarg);
 456   __ bind(resolved);
 457 
 458   { // Check for the null sentinel.
 459     // If we just called the VM, it already did the mapping for us,
 460     // but it's harmless to retry.
 461     Label notNull;
 462     ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
 463     __ movptr(tmp, null_sentinel);
 464     __ resolve_oop_handle(tmp, rscratch2);
 465     __ cmpoop(tmp, result);
 466     __ jccb(Assembler::notEqual, notNull);
 467     __ xorptr(result, result);  // null object reference
 468     __ bind(notNull);
 469   }
 470 
 471   if (VerifyOops) {
 472     __ verify_oop(result);
 473   }
 474 }
 475 
 476 void TemplateTable::ldc2_w() {
 477   transition(vtos, vtos);
 478   Label notDouble, notLong, Done;
 479   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 480 
 481   __ get_cpool_and_tags(rcx, rax);
 482   const int base_offset = ConstantPool::header_size() * wordSize;
 483   const int tags_offset = Array<u1>::base_offset_in_bytes();
 484 
 485   // get type
 486   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 487   __ cmpl(rdx, JVM_CONSTANT_Double);
 488   __ jccb(Assembler::notEqual, notDouble);
 489 
 490   // dtos
 491   __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
 492   __ push(dtos);
 493 
 494   __ jmp(Done);
 495   __ bind(notDouble);
 496   __ cmpl(rdx, JVM_CONSTANT_Long);
 497   __ jccb(Assembler::notEqual, notLong);
 498 
 499   // ltos
 500   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
 501   NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
 502   __ push(ltos);
 503   __ jmp(Done);
 504 
 505   __ bind(notLong);
 506   condy_helper(Done);
 507 
 508   __ bind(Done);
 509 }
 510 
 511 void TemplateTable::condy_helper(Label& Done) {
 512   const Register obj = rax;
 513   const Register off = rbx;
 514   const Register flags = rcx;
 515   const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 516   __ movl(rarg, (int)bytecode());
 517   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
 518 #ifndef _LP64
 519   // borrow rdi from locals
 520   __ get_thread(rdi);
 521   __ get_vm_result_2(flags, rdi);
 522   __ restore_locals();
 523 #else
 524   __ get_vm_result_2(flags, r15_thread);
 525 #endif
 526   // VMr = obj = base address to find primitive value to push
 527   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 528   __ movl(off, flags);
 529   __ andl(off, ConstantPoolCache::field_index_mask);
 530   const Address field(obj, off, Address::times_1, 0*wordSize);
 531 
 532   // What sort of thing are we loading?
 533   __ shrl(flags, ConstantPoolCache::tos_state_shift);
 534   __ andl(flags, ConstantPoolCache::tos_state_mask);
 535 
 536   switch (bytecode()) {
 537   case Bytecodes::_ldc:
 538   case Bytecodes::_ldc_w:
 539     {
 540       // tos in (itos, ftos, stos, btos, ctos, ztos)
 541       Label notInt, notFloat, notShort, notByte, notChar, notBool;
 542       __ cmpl(flags, itos);
 543       __ jccb(Assembler::notEqual, notInt);
 544       // itos
 545       __ movl(rax, field);
 546       __ push(itos);
 547       __ jmp(Done);
 548 
 549       __ bind(notInt);
 550       __ cmpl(flags, ftos);
 551       __ jccb(Assembler::notEqual, notFloat);
 552       // ftos
 553       __ load_float(field);
 554       __ push(ftos);
 555       __ jmp(Done);
 556 
 557       __ bind(notFloat);
 558       __ cmpl(flags, stos);
 559       __ jccb(Assembler::notEqual, notShort);
 560       // stos
 561       __ load_signed_short(rax, field);
 562       __ push(stos);
 563       __ jmp(Done);
 564 
 565       __ bind(notShort);
 566       __ cmpl(flags, btos);
 567       __ jccb(Assembler::notEqual, notByte);
 568       // btos
 569       __ load_signed_byte(rax, field);
 570       __ push(btos);
 571       __ jmp(Done);
 572 
 573       __ bind(notByte);
 574       __ cmpl(flags, ctos);
 575       __ jccb(Assembler::notEqual, notChar);
 576       // ctos
 577       __ load_unsigned_short(rax, field);
 578       __ push(ctos);
 579       __ jmp(Done);
 580 
 581       __ bind(notChar);
 582       __ cmpl(flags, ztos);
 583       __ jccb(Assembler::notEqual, notBool);
 584       // ztos
 585       __ load_signed_byte(rax, field);
 586       __ push(ztos);
 587       __ jmp(Done);
 588 
 589       __ bind(notBool);
 590       break;
 591     }
 592 
 593   case Bytecodes::_ldc2_w:
 594     {
 595       Label notLong, notDouble;
 596       __ cmpl(flags, ltos);
 597       __ jccb(Assembler::notEqual, notLong);
 598       // ltos
 599       // Loading high word first because movptr clobbers rax
 600       NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
 601       __ movptr(rax, field);
 602       __ push(ltos);
 603       __ jmp(Done);
 604 
 605       __ bind(notLong);
 606       __ cmpl(flags, dtos);
 607       __ jccb(Assembler::notEqual, notDouble);
 608       // dtos
 609       __ load_double(field);
 610       __ push(dtos);
 611       __ jmp(Done);
 612 
 613       __ bind(notDouble);
 614       break;
 615     }
 616 
 617   default:
 618     ShouldNotReachHere();
 619   }
 620 
 621   __ stop("bad ldc/condy");
 622 }
 623 
 624 void TemplateTable::locals_index(Register reg, int offset) {
 625   __ load_unsigned_byte(reg, at_bcp(offset));
 626   __ negptr(reg);
 627 }
 628 
 629 void TemplateTable::iload() {
 630   iload_internal();
 631 }
 632 
 633 void TemplateTable::nofast_iload() {
 634   iload_internal(may_not_rewrite);
 635 }
 636 
 637 void TemplateTable::iload_internal(RewriteControl rc) {
 638   transition(vtos, itos);
 639   if (RewriteFrequentPairs && rc == may_rewrite) {
 640     Label rewrite, done;
 641     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 642     LP64_ONLY(assert(rbx != bc, "register damaged"));
 643 
 644     // get next byte
 645     __ load_unsigned_byte(rbx,
 646                           at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 647     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 648     // last two iloads in a pair.  Comparing against fast_iload means that
 649     // the next bytecode is neither an iload or a caload, and therefore
 650     // an iload pair.
 651     __ cmpl(rbx, Bytecodes::_iload);
 652     __ jcc(Assembler::equal, done);
 653 
 654     __ cmpl(rbx, Bytecodes::_fast_iload);
 655     __ movl(bc, Bytecodes::_fast_iload2);
 656 
 657     __ jccb(Assembler::equal, rewrite);
 658 
 659     // if _caload, rewrite to fast_icaload
 660     __ cmpl(rbx, Bytecodes::_caload);
 661     __ movl(bc, Bytecodes::_fast_icaload);
 662     __ jccb(Assembler::equal, rewrite);
 663 
 664     // rewrite so iload doesn't check again.
 665     __ movl(bc, Bytecodes::_fast_iload);
 666 
 667     // rewrite
 668     // bc: fast bytecode
 669     __ bind(rewrite);
 670     patch_bytecode(Bytecodes::_iload, bc, rbx, false);
 671     __ bind(done);
 672   }
 673 
 674   // Get the local value into tos
 675   locals_index(rbx);
 676   __ movl(rax, iaddress(rbx));
 677 }
 678 
 679 void TemplateTable::fast_iload2() {
 680   transition(vtos, itos);
 681   locals_index(rbx);
 682   __ movl(rax, iaddress(rbx));
 683   __ push(itos);
 684   locals_index(rbx, 3);
 685   __ movl(rax, iaddress(rbx));
 686 }
 687 
 688 void TemplateTable::fast_iload() {
 689   transition(vtos, itos);
 690   locals_index(rbx);
 691   __ movl(rax, iaddress(rbx));
 692 }
 693 
 694 void TemplateTable::lload() {
 695   transition(vtos, ltos);
 696   locals_index(rbx);
 697   __ movptr(rax, laddress(rbx));
 698   NOT_LP64(__ movl(rdx, haddress(rbx)));
 699 }
 700 
 701 void TemplateTable::fload() {
 702   transition(vtos, ftos);
 703   locals_index(rbx);
 704   __ load_float(faddress(rbx));
 705 }
 706 
 707 void TemplateTable::dload() {
 708   transition(vtos, dtos);
 709   locals_index(rbx);
 710   __ load_double(daddress(rbx));
 711 }
 712 
 713 void TemplateTable::aload() {
 714   transition(vtos, atos);
 715   locals_index(rbx);
 716   __ movptr(rax, aaddress(rbx));
 717 }
 718 
 719 void TemplateTable::locals_index_wide(Register reg) {
 720   __ load_unsigned_short(reg, at_bcp(2));
 721   __ bswapl(reg);
 722   __ shrl(reg, 16);
 723   __ negptr(reg);
 724 }
 725 
 726 void TemplateTable::wide_iload() {
 727   transition(vtos, itos);
 728   locals_index_wide(rbx);
 729   __ movl(rax, iaddress(rbx));
 730 }
 731 
 732 void TemplateTable::wide_lload() {
 733   transition(vtos, ltos);
 734   locals_index_wide(rbx);
 735   __ movptr(rax, laddress(rbx));
 736   NOT_LP64(__ movl(rdx, haddress(rbx)));
 737 }
 738 
 739 void TemplateTable::wide_fload() {
 740   transition(vtos, ftos);
 741   locals_index_wide(rbx);
 742   __ load_float(faddress(rbx));
 743 }
 744 
 745 void TemplateTable::wide_dload() {
 746   transition(vtos, dtos);
 747   locals_index_wide(rbx);
 748   __ load_double(daddress(rbx));
 749 }
 750 
 751 void TemplateTable::wide_aload() {
 752   transition(vtos, atos);
 753   locals_index_wide(rbx);
 754   __ movptr(rax, aaddress(rbx));
 755 }
 756 
 757 void TemplateTable::index_check(Register array, Register index) {
 758   // Pop ptr into array
 759   __ pop_ptr(array);
 760   index_check_without_pop(array, index);
 761 }
 762 
 763 void TemplateTable::index_check_without_pop(Register array, Register index) {
 764   // destroys rbx
 765   // sign extend index for use by indexed load
 766   __ movl2ptr(index, index);
 767   // check index
 768   __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
 769   if (index != rbx) {
 770     // ??? convention: move aberrant index into rbx for exception message
 771     assert(rbx != array, "different registers");
 772     __ movl(rbx, index);
 773   }
 774   Label skip;
 775   __ jccb(Assembler::below, skip);
 776   // Pass array to create more detailed exceptions.
 777   __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
 778   __ jump(RuntimeAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
 779   __ bind(skip);
 780 }
 781 
 782 void TemplateTable::iaload() {
 783   transition(itos, itos);
 784   // rax: index
 785   // rdx: array
 786   index_check(rdx, rax); // kills rbx
 787   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
 788                     Address(rdx, rax, Address::times_4,
 789                             arrayOopDesc::base_offset_in_bytes(T_INT)),
 790                     noreg, noreg);
 791 }
 792 
 793 void TemplateTable::laload() {
 794   transition(itos, ltos);
 795   // rax: index
 796   // rdx: array
 797   index_check(rdx, rax); // kills rbx
 798   NOT_LP64(__ mov(rbx, rax));
 799   // rbx,: index
 800   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
 801                     Address(rdx, rbx, Address::times_8,
 802                             arrayOopDesc::base_offset_in_bytes(T_LONG)),
 803                     noreg, noreg);
 804 }
 805 
 806 
 807 
 808 void TemplateTable::faload() {
 809   transition(itos, ftos);
 810   // rax: index
 811   // rdx: array
 812   index_check(rdx, rax); // kills rbx
 813   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
 814                     Address(rdx, rax,
 815                             Address::times_4,
 816                             arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
 817                     noreg, noreg);
 818 }
 819 
 820 void TemplateTable::daload() {
 821   transition(itos, dtos);
 822   // rax: index
 823   // rdx: array
 824   index_check(rdx, rax); // kills rbx
 825   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
 826                     Address(rdx, rax,
 827                             Address::times_8,
 828                             arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
 829                     noreg, noreg);
 830 }
 831 
 832 void TemplateTable::aaload() {
 833   transition(itos, atos);
 834   Register array = rdx;
 835   Register index = rax;
 836 
 837   index_check(array, index); // kills rbx
 838   __ profile_array_type<ArrayLoadData>(rbx, array, rcx);
 839   if (UseArrayFlattening) {
 840     Label is_flat_array, done;
 841     __ test_flat_array_oop(array, rbx, is_flat_array);
 842     do_oop_load(_masm,
 843                 Address(array, index,
 844                         UseCompressedOops ? Address::times_4 : Address::times_ptr,
 845                         arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 846                 rax,
 847                 IS_ARRAY);
 848     __ jmp(done);
 849     __ bind(is_flat_array);
 850     __ movptr(rcx, array);
 851     call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_load), rcx, index);
 852     __ bind(done);
 853   } else {
 854     do_oop_load(_masm,
 855                 Address(array, index,
 856                         UseCompressedOops ? Address::times_4 : Address::times_ptr,
 857                         arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 858                 rax,
 859                 IS_ARRAY);
 860   }
 861   __ profile_element_type(rbx, rax, rcx);
 862 }
 863 
 864 void TemplateTable::baload() {
 865   transition(itos, itos);
 866   // rax: index
 867   // rdx: array
 868   index_check(rdx, rax); // kills rbx
 869   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
 870                     Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
 871                     noreg, noreg);
 872 }
 873 
 874 void TemplateTable::caload() {
 875   transition(itos, itos);
 876   // rax: index
 877   // rdx: array
 878   index_check(rdx, rax); // kills rbx
 879   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 880                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 881                     noreg, noreg);
 882 }
 883 
 884 // iload followed by caload frequent pair
 885 void TemplateTable::fast_icaload() {
 886   transition(vtos, itos);
 887   // load index out of locals
 888   locals_index(rbx);
 889   __ movl(rax, iaddress(rbx));
 890 
 891   // rax: index
 892   // rdx: array
 893   index_check(rdx, rax); // kills rbx
 894   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 895                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 896                     noreg, noreg);
 897 }
 898 
 899 
 900 void TemplateTable::saload() {
 901   transition(itos, itos);
 902   // rax: index
 903   // rdx: array
 904   index_check(rdx, rax); // kills rbx
 905   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
 906                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
 907                     noreg, noreg);
 908 }
 909 
 910 void TemplateTable::iload(int n) {
 911   transition(vtos, itos);
 912   __ movl(rax, iaddress(n));
 913 }
 914 
 915 void TemplateTable::lload(int n) {
 916   transition(vtos, ltos);
 917   __ movptr(rax, laddress(n));
 918   NOT_LP64(__ movptr(rdx, haddress(n)));
 919 }
 920 
 921 void TemplateTable::fload(int n) {
 922   transition(vtos, ftos);
 923   __ load_float(faddress(n));
 924 }
 925 
 926 void TemplateTable::dload(int n) {
 927   transition(vtos, dtos);
 928   __ load_double(daddress(n));
 929 }
 930 
 931 void TemplateTable::aload(int n) {
 932   transition(vtos, atos);
 933   __ movptr(rax, aaddress(n));
 934 }
 935 
 936 void TemplateTable::aload_0() {
 937   aload_0_internal();
 938 }
 939 
 940 void TemplateTable::nofast_aload_0() {
 941   aload_0_internal(may_not_rewrite);
 942 }
 943 
 944 void TemplateTable::aload_0_internal(RewriteControl rc) {
 945   transition(vtos, atos);
 946   // According to bytecode histograms, the pairs:
 947   //
 948   // _aload_0, _fast_igetfield
 949   // _aload_0, _fast_agetfield
 950   // _aload_0, _fast_fgetfield
 951   //
 952   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 953   // _aload_0 bytecode checks if the next bytecode is either
 954   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 955   // rewrites the current bytecode into a pair bytecode; otherwise it
 956   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 957   // the pair check anymore.
 958   //
 959   // Note: If the next bytecode is _getfield, the rewrite must be
 960   //       delayed, otherwise we may miss an opportunity for a pair.
 961   //
 962   // Also rewrite frequent pairs
 963   //   aload_0, aload_1
 964   //   aload_0, iload_1
 965   // These bytecodes with a small amount of code are most profitable
 966   // to rewrite
 967   if (RewriteFrequentPairs && rc == may_rewrite) {
 968     Label rewrite, done;
 969 
 970     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 971     LP64_ONLY(assert(rbx != bc, "register damaged"));
 972 
 973     // get next byte
 974     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 975 
 976     // if _getfield then wait with rewrite
 977     __ cmpl(rbx, Bytecodes::_getfield);
 978     __ jcc(Assembler::equal, done);
 979 
 980     // if _igetfield then rewrite to _fast_iaccess_0
 981     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 982     __ cmpl(rbx, Bytecodes::_fast_igetfield);
 983     __ movl(bc, Bytecodes::_fast_iaccess_0);
 984     __ jccb(Assembler::equal, rewrite);
 985 
 986     // if _agetfield then rewrite to _fast_aaccess_0
 987     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 988     __ cmpl(rbx, Bytecodes::_fast_agetfield);
 989     __ movl(bc, Bytecodes::_fast_aaccess_0);
 990     __ jccb(Assembler::equal, rewrite);
 991 
 992     // if _fgetfield then rewrite to _fast_faccess_0
 993     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 994     __ cmpl(rbx, Bytecodes::_fast_fgetfield);
 995     __ movl(bc, Bytecodes::_fast_faccess_0);
 996     __ jccb(Assembler::equal, rewrite);
 997 
 998     // else rewrite to _fast_aload0
 999     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1000     __ movl(bc, Bytecodes::_fast_aload_0);
1001 
1002     // rewrite
1003     // bc: fast bytecode
1004     __ bind(rewrite);
1005     patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
1006 
1007     __ bind(done);
1008   }
1009 
1010   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
1011   aload(0);
1012 }
1013 
1014 void TemplateTable::istore() {
1015   transition(itos, vtos);
1016   locals_index(rbx);
1017   __ movl(iaddress(rbx), rax);
1018 }
1019 
1020 
1021 void TemplateTable::lstore() {
1022   transition(ltos, vtos);
1023   locals_index(rbx);
1024   __ movptr(laddress(rbx), rax);
1025   NOT_LP64(__ movptr(haddress(rbx), rdx));
1026 }
1027 
1028 void TemplateTable::fstore() {
1029   transition(ftos, vtos);
1030   locals_index(rbx);
1031   __ store_float(faddress(rbx));
1032 }
1033 
1034 void TemplateTable::dstore() {
1035   transition(dtos, vtos);
1036   locals_index(rbx);
1037   __ store_double(daddress(rbx));
1038 }
1039 
1040 void TemplateTable::astore() {
1041   transition(vtos, vtos);
1042   __ pop_ptr(rax);
1043   locals_index(rbx);
1044   __ movptr(aaddress(rbx), rax);
1045 }
1046 
1047 void TemplateTable::wide_istore() {
1048   transition(vtos, vtos);
1049   __ pop_i();
1050   locals_index_wide(rbx);
1051   __ movl(iaddress(rbx), rax);
1052 }
1053 
1054 void TemplateTable::wide_lstore() {
1055   transition(vtos, vtos);
1056   NOT_LP64(__ pop_l(rax, rdx));
1057   LP64_ONLY(__ pop_l());
1058   locals_index_wide(rbx);
1059   __ movptr(laddress(rbx), rax);
1060   NOT_LP64(__ movl(haddress(rbx), rdx));
1061 }
1062 
1063 void TemplateTable::wide_fstore() {
1064 #ifdef _LP64
1065   transition(vtos, vtos);
1066   __ pop_f(xmm0);
1067   locals_index_wide(rbx);
1068   __ movflt(faddress(rbx), xmm0);
1069 #else
1070   wide_istore();
1071 #endif
1072 }
1073 
1074 void TemplateTable::wide_dstore() {
1075 #ifdef _LP64
1076   transition(vtos, vtos);
1077   __ pop_d(xmm0);
1078   locals_index_wide(rbx);
1079   __ movdbl(daddress(rbx), xmm0);
1080 #else
1081   wide_lstore();
1082 #endif
1083 }
1084 
1085 void TemplateTable::wide_astore() {
1086   transition(vtos, vtos);
1087   __ pop_ptr(rax);
1088   locals_index_wide(rbx);
1089   __ movptr(aaddress(rbx), rax);
1090 }
1091 
1092 void TemplateTable::iastore() {
1093   transition(itos, vtos);
1094   __ pop_i(rbx);
1095   // rax: value
1096   // rbx: index
1097   // rdx: array
1098   index_check(rdx, rbx); // prefer index in rbx
1099   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1100                      Address(rdx, rbx, Address::times_4,
1101                              arrayOopDesc::base_offset_in_bytes(T_INT)),
1102                      rax, noreg, noreg, noreg);
1103 }
1104 
1105 void TemplateTable::lastore() {
1106   transition(ltos, vtos);
1107   __ pop_i(rbx);
1108   // rax,: low(value)
1109   // rcx: array
1110   // rdx: high(value)
1111   index_check(rcx, rbx);  // prefer index in rbx,
1112   // rbx,: index
1113   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1114                      Address(rcx, rbx, Address::times_8,
1115                              arrayOopDesc::base_offset_in_bytes(T_LONG)),
1116                      noreg /* ltos */, noreg, noreg, noreg);
1117 }
1118 
1119 
1120 void TemplateTable::fastore() {
1121   transition(ftos, vtos);
1122   __ pop_i(rbx);
1123   // value is in UseSSE >= 1 ? xmm0 : ST(0)
1124   // rbx:  index
1125   // rdx:  array
1126   index_check(rdx, rbx); // prefer index in rbx
1127   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1128                      Address(rdx, rbx, Address::times_4,
1129                              arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1130                      noreg /* ftos */, noreg, noreg, noreg);
1131 }
1132 
1133 void TemplateTable::dastore() {
1134   transition(dtos, vtos);
1135   __ pop_i(rbx);
1136   // value is in UseSSE >= 2 ? xmm0 : ST(0)
1137   // rbx:  index
1138   // rdx:  array
1139   index_check(rdx, rbx); // prefer index in rbx
1140   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1141                      Address(rdx, rbx, Address::times_8,
1142                              arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1143                      noreg /* dtos */, noreg, noreg, noreg);
1144 }
1145 
1146 void TemplateTable::aastore() {
1147   Label is_null, is_flat_array, ok_is_subtype, done;
1148   transition(vtos, vtos);
1149   // stack: ..., array, index, value
1150   __ movptr(rax, at_tos());    // value
1151   __ movl(rcx, at_tos_p1()); // index
1152   __ movptr(rdx, at_tos_p2()); // array
1153 
1154   Address element_address(rdx, rcx,
1155                           UseCompressedOops? Address::times_4 : Address::times_ptr,
1156                           arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1157 
1158   index_check_without_pop(rdx, rcx);     // kills rbx
1159 
1160   __ profile_array_type<ArrayStoreData>(rdi, rdx, rbx);
1161   __ profile_multiple_element_types(rdi, rax, rbx, rcx);
1162 
1163   __ testptr(rax, rax);
1164   __ jcc(Assembler::zero, is_null);
1165 
1166   // Move array class to rdi
1167   __ load_klass(rdi, rdx, rscratch1);
1168   if (UseArrayFlattening) {
1169     __ movl(rbx, Address(rdi, Klass::layout_helper_offset()));
1170     __ test_flat_array_layout(rbx, is_flat_array);
1171   }
1172 
1173   // Move subklass into rbx
1174   __ load_klass(rbx, rax, rscratch1);
1175   // Move array element superklass into rax
1176   __ movptr(rax, Address(rdi,
1177                          ObjArrayKlass::element_klass_offset()));
1178 
1179   // Generate subtype check.  Blows rcx, rdi
1180   // Superklass in rax.  Subklass in rbx.
1181   // is "rbx <: rax" ? (value subclass <: array element superclass)
1182   __ gen_subtype_check(rbx, ok_is_subtype, false);
1183 
1184   // Come here on failure
1185   // object is at TOS
1186   __ jump(RuntimeAddress(Interpreter::_throw_ArrayStoreException_entry));
1187 
1188   // Come here on success
1189   __ bind(ok_is_subtype);
1190 
1191   // Get the value we will store
1192   __ movptr(rax, at_tos());
1193   __ movl(rcx, at_tos_p1()); // index
1194   // Now store using the appropriate barrier
1195   do_oop_store(_masm, element_address, rax, IS_ARRAY);
1196   __ jmp(done);
1197 
1198   // Have a null in rax, rdx=array, ecx=index.  Store null at ary[idx]
1199   __ bind(is_null);
1200   if (EnableValhalla) {
1201     Label is_null_into_value_array_npe, store_null;
1202 
1203       // Move array class to rdi
1204     __ load_klass(rdi, rdx, rscratch1);
1205     if (UseArrayFlattening) {
1206       __ movl(rbx, Address(rdi, Klass::layout_helper_offset()));
1207       __ test_flat_array_layout(rbx, is_flat_array);
1208     }
1209 
1210     // No way to store null in null-free array
1211     __ test_null_free_array_oop(rdx, rbx, is_null_into_value_array_npe);
1212     __ jmp(store_null);
1213 
1214     __ bind(is_null_into_value_array_npe);
1215     __ jump(RuntimeAddress(Interpreter::_throw_NullPointerException_entry));
1216 
1217     __ bind(store_null);
1218   }
1219   // Store a null
1220   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1221   __ jmp(done);
1222 
1223   if (UseArrayFlattening) {
1224     Label is_type_ok;
1225     __ bind(is_flat_array); // Store non-null value to flat
1226 
1227     __ movptr(rax, at_tos());
1228     __ movl(rcx, at_tos_p1()); // index
1229     __ movptr(rdx, at_tos_p2()); // array
1230 
1231     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_store), rax, rdx, rcx);
1232   }
1233   // Pop stack arguments
1234   __ bind(done);
1235   __ addptr(rsp, 3 * Interpreter::stackElementSize);
1236 }
1237 
1238 void TemplateTable::bastore() {
1239   transition(itos, vtos);
1240   __ pop_i(rbx);
1241   // rax: value
1242   // rbx: index
1243   // rdx: array
1244   index_check(rdx, rbx); // prefer index in rbx
1245   // Need to check whether array is boolean or byte
1246   // since both types share the bastore bytecode.
1247   __ load_klass(rcx, rdx, rscratch1);
1248   __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1249   int diffbit = Klass::layout_helper_boolean_diffbit();
1250   __ testl(rcx, diffbit);
1251   Label L_skip;
1252   __ jccb(Assembler::zero, L_skip);
1253   __ andl(rax, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1254   __ bind(L_skip);
1255   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1256                      Address(rdx, rbx,Address::times_1,
1257                              arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1258                      rax, noreg, noreg, noreg);
1259 }
1260 
1261 void TemplateTable::castore() {
1262   transition(itos, vtos);
1263   __ pop_i(rbx);
1264   // rax: value
1265   // rbx: index
1266   // rdx: array
1267   index_check(rdx, rbx);  // prefer index in rbx
1268   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1269                      Address(rdx, rbx, Address::times_2,
1270                              arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1271                      rax, noreg, noreg, noreg);
1272 }
1273 
1274 
1275 void TemplateTable::sastore() {
1276   castore();
1277 }
1278 
1279 void TemplateTable::istore(int n) {
1280   transition(itos, vtos);
1281   __ movl(iaddress(n), rax);
1282 }
1283 
1284 void TemplateTable::lstore(int n) {
1285   transition(ltos, vtos);
1286   __ movptr(laddress(n), rax);
1287   NOT_LP64(__ movptr(haddress(n), rdx));
1288 }
1289 
1290 void TemplateTable::fstore(int n) {
1291   transition(ftos, vtos);
1292   __ store_float(faddress(n));
1293 }
1294 
1295 void TemplateTable::dstore(int n) {
1296   transition(dtos, vtos);
1297   __ store_double(daddress(n));
1298 }
1299 
1300 
1301 void TemplateTable::astore(int n) {
1302   transition(vtos, vtos);
1303   __ pop_ptr(rax);
1304   __ movptr(aaddress(n), rax);
1305 }
1306 
1307 void TemplateTable::pop() {
1308   transition(vtos, vtos);
1309   __ addptr(rsp, Interpreter::stackElementSize);
1310 }
1311 
1312 void TemplateTable::pop2() {
1313   transition(vtos, vtos);
1314   __ addptr(rsp, 2 * Interpreter::stackElementSize);
1315 }
1316 
1317 
1318 void TemplateTable::dup() {
1319   transition(vtos, vtos);
1320   __ load_ptr(0, rax);
1321   __ push_ptr(rax);
1322   // stack: ..., a, a
1323 }
1324 
1325 void TemplateTable::dup_x1() {
1326   transition(vtos, vtos);
1327   // stack: ..., a, b
1328   __ load_ptr( 0, rax);  // load b
1329   __ load_ptr( 1, rcx);  // load a
1330   __ store_ptr(1, rax);  // store b
1331   __ store_ptr(0, rcx);  // store a
1332   __ push_ptr(rax);      // push b
1333   // stack: ..., b, a, b
1334 }
1335 
1336 void TemplateTable::dup_x2() {
1337   transition(vtos, vtos);
1338   // stack: ..., a, b, c
1339   __ load_ptr( 0, rax);  // load c
1340   __ load_ptr( 2, rcx);  // load a
1341   __ store_ptr(2, rax);  // store c in a
1342   __ push_ptr(rax);      // push c
1343   // stack: ..., c, b, c, c
1344   __ load_ptr( 2, rax);  // load b
1345   __ store_ptr(2, rcx);  // store a in b
1346   // stack: ..., c, a, c, c
1347   __ store_ptr(1, rax);  // store b in c
1348   // stack: ..., c, a, b, c
1349 }
1350 
1351 void TemplateTable::dup2() {
1352   transition(vtos, vtos);
1353   // stack: ..., a, b
1354   __ load_ptr(1, rax);  // load a
1355   __ push_ptr(rax);     // push a
1356   __ load_ptr(1, rax);  // load b
1357   __ push_ptr(rax);     // push b
1358   // stack: ..., a, b, a, b
1359 }
1360 
1361 
1362 void TemplateTable::dup2_x1() {
1363   transition(vtos, vtos);
1364   // stack: ..., a, b, c
1365   __ load_ptr( 0, rcx);  // load c
1366   __ load_ptr( 1, rax);  // load b
1367   __ push_ptr(rax);      // push b
1368   __ push_ptr(rcx);      // push c
1369   // stack: ..., a, b, c, b, c
1370   __ store_ptr(3, rcx);  // store c in b
1371   // stack: ..., a, c, c, b, c
1372   __ load_ptr( 4, rcx);  // load a
1373   __ store_ptr(2, rcx);  // store a in 2nd c
1374   // stack: ..., a, c, a, b, c
1375   __ store_ptr(4, rax);  // store b in a
1376   // stack: ..., b, c, a, b, c
1377 }
1378 
1379 void TemplateTable::dup2_x2() {
1380   transition(vtos, vtos);
1381   // stack: ..., a, b, c, d
1382   __ load_ptr( 0, rcx);  // load d
1383   __ load_ptr( 1, rax);  // load c
1384   __ push_ptr(rax);      // push c
1385   __ push_ptr(rcx);      // push d
1386   // stack: ..., a, b, c, d, c, d
1387   __ load_ptr( 4, rax);  // load b
1388   __ store_ptr(2, rax);  // store b in d
1389   __ store_ptr(4, rcx);  // store d in b
1390   // stack: ..., a, d, c, b, c, d
1391   __ load_ptr( 5, rcx);  // load a
1392   __ load_ptr( 3, rax);  // load c
1393   __ store_ptr(3, rcx);  // store a in c
1394   __ store_ptr(5, rax);  // store c in a
1395   // stack: ..., c, d, a, b, c, d
1396 }
1397 
1398 void TemplateTable::swap() {
1399   transition(vtos, vtos);
1400   // stack: ..., a, b
1401   __ load_ptr( 1, rcx);  // load a
1402   __ load_ptr( 0, rax);  // load b
1403   __ store_ptr(0, rcx);  // store a in b
1404   __ store_ptr(1, rax);  // store b in a
1405   // stack: ..., b, a
1406 }
1407 
1408 void TemplateTable::iop2(Operation op) {
1409   transition(itos, itos);
1410   switch (op) {
1411   case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
1412   case sub  : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1413   case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
1414   case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
1415   case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
1416   case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
1417   case shl  : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax);      break;
1418   case shr  : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax);      break;
1419   case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax);      break;
1420   default   : ShouldNotReachHere();
1421   }
1422 }
1423 
1424 void TemplateTable::lop2(Operation op) {
1425   transition(ltos, ltos);
1426 #ifdef _LP64
1427   switch (op) {
1428   case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
1429   case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
1430   case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
1431   case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
1432   case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
1433   default   : ShouldNotReachHere();
1434   }
1435 #else
1436   __ pop_l(rbx, rcx);
1437   switch (op) {
1438     case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1439     case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1440                 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1441     case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1442     case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1443     case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1444     default   : ShouldNotReachHere();
1445   }
1446 #endif
1447 }
1448 
1449 void TemplateTable::idiv() {
1450   transition(itos, itos);
1451   __ movl(rcx, rax);
1452   __ pop_i(rax);
1453   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1454   //       they are not equal, one could do a normal division (no correction
1455   //       needed), which may speed up this implementation for the common case.
1456   //       (see also JVM spec., p.243 & p.271)
1457   __ corrected_idivl(rcx);
1458 }
1459 
1460 void TemplateTable::irem() {
1461   transition(itos, itos);
1462   __ movl(rcx, rax);
1463   __ pop_i(rax);
1464   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1465   //       they are not equal, one could do a normal division (no correction
1466   //       needed), which may speed up this implementation for the common case.
1467   //       (see also JVM spec., p.243 & p.271)
1468   __ corrected_idivl(rcx);
1469   __ movl(rax, rdx);
1470 }
1471 
1472 void TemplateTable::lmul() {
1473   transition(ltos, ltos);
1474 #ifdef _LP64
1475   __ pop_l(rdx);
1476   __ imulq(rax, rdx);
1477 #else
1478   __ pop_l(rbx, rcx);
1479   __ push(rcx); __ push(rbx);
1480   __ push(rdx); __ push(rax);
1481   __ lmul(2 * wordSize, 0);
1482   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1483 #endif
1484 }
1485 
1486 void TemplateTable::ldiv() {
1487   transition(ltos, ltos);
1488 #ifdef _LP64
1489   __ mov(rcx, rax);
1490   __ pop_l(rax);
1491   // generate explicit div0 check
1492   __ testq(rcx, rcx);
1493   __ jump_cc(Assembler::zero,
1494              RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1495   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1496   //       they are not equal, one could do a normal division (no correction
1497   //       needed), which may speed up this implementation for the common case.
1498   //       (see also JVM spec., p.243 & p.271)
1499   __ corrected_idivq(rcx); // kills rbx
1500 #else
1501   __ pop_l(rbx, rcx);
1502   __ push(rcx); __ push(rbx);
1503   __ push(rdx); __ push(rax);
1504   // check if y = 0
1505   __ orl(rax, rdx);
1506   __ jump_cc(Assembler::zero,
1507              RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1508   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1509   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1510 #endif
1511 }
1512 
1513 void TemplateTable::lrem() {
1514   transition(ltos, ltos);
1515 #ifdef _LP64
1516   __ mov(rcx, rax);
1517   __ pop_l(rax);
1518   __ testq(rcx, rcx);
1519   __ jump_cc(Assembler::zero,
1520              RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1521   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1522   //       they are not equal, one could do a normal division (no correction
1523   //       needed), which may speed up this implementation for the common case.
1524   //       (see also JVM spec., p.243 & p.271)
1525   __ corrected_idivq(rcx); // kills rbx
1526   __ mov(rax, rdx);
1527 #else
1528   __ pop_l(rbx, rcx);
1529   __ push(rcx); __ push(rbx);
1530   __ push(rdx); __ push(rax);
1531   // check if y = 0
1532   __ orl(rax, rdx);
1533   __ jump_cc(Assembler::zero,
1534              RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1535   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1536   __ addptr(rsp, 4 * wordSize);
1537 #endif
1538 }
1539 
1540 void TemplateTable::lshl() {
1541   transition(itos, ltos);
1542   __ movl(rcx, rax);                             // get shift count
1543   #ifdef _LP64
1544   __ pop_l(rax);                                 // get shift value
1545   __ shlq(rax);
1546 #else
1547   __ pop_l(rax, rdx);                            // get shift value
1548   __ lshl(rdx, rax);
1549 #endif
1550 }
1551 
1552 void TemplateTable::lshr() {
1553 #ifdef _LP64
1554   transition(itos, ltos);
1555   __ movl(rcx, rax);                             // get shift count
1556   __ pop_l(rax);                                 // get shift value
1557   __ sarq(rax);
1558 #else
1559   transition(itos, ltos);
1560   __ mov(rcx, rax);                              // get shift count
1561   __ pop_l(rax, rdx);                            // get shift value
1562   __ lshr(rdx, rax, true);
1563 #endif
1564 }
1565 
1566 void TemplateTable::lushr() {
1567   transition(itos, ltos);
1568 #ifdef _LP64
1569   __ movl(rcx, rax);                             // get shift count
1570   __ pop_l(rax);                                 // get shift value
1571   __ shrq(rax);
1572 #else
1573   __ mov(rcx, rax);                              // get shift count
1574   __ pop_l(rax, rdx);                            // get shift value
1575   __ lshr(rdx, rax);
1576 #endif
1577 }
1578 
1579 void TemplateTable::fop2(Operation op) {
1580   transition(ftos, ftos);
1581 
1582   if (UseSSE >= 1) {
1583     switch (op) {
1584     case add:
1585       __ addss(xmm0, at_rsp());
1586       __ addptr(rsp, Interpreter::stackElementSize);
1587       break;
1588     case sub:
1589       __ movflt(xmm1, xmm0);
1590       __ pop_f(xmm0);
1591       __ subss(xmm0, xmm1);
1592       break;
1593     case mul:
1594       __ mulss(xmm0, at_rsp());
1595       __ addptr(rsp, Interpreter::stackElementSize);
1596       break;
1597     case div:
1598       __ movflt(xmm1, xmm0);
1599       __ pop_f(xmm0);
1600       __ divss(xmm0, xmm1);
1601       break;
1602     case rem:
1603       // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1604       // modulo operation. The frem method calls the function
1605       // double fmod(double x, double y) in math.h. The documentation of fmod states:
1606       // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1607       // (signalling or quiet) is returned.
1608       //
1609       // On x86_32 platforms the FPU is used to perform the modulo operation. The
1610       // reason is that on 32-bit Windows the sign of modulo operations diverges from
1611       // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1612       // The fprem instruction used on x86_32 is functionally equivalent to
1613       // SharedRuntime::frem in that it returns a NaN.
1614 #ifdef _LP64
1615       __ movflt(xmm1, xmm0);
1616       __ pop_f(xmm0);
1617       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1618 #else // !_LP64
1619       __ push_f(xmm0);
1620       __ pop_f();
1621       __ fld_s(at_rsp());
1622       __ fremr(rax);
1623       __ f2ieee();
1624       __ pop(rax);  // pop second operand off the stack
1625       __ push_f();
1626       __ pop_f(xmm0);
1627 #endif // _LP64
1628       break;
1629     default:
1630       ShouldNotReachHere();
1631       break;
1632     }
1633   } else {
1634 #ifdef _LP64
1635     ShouldNotReachHere();
1636 #else // !_LP64
1637     switch (op) {
1638     case add: __ fadd_s (at_rsp());                break;
1639     case sub: __ fsubr_s(at_rsp());                break;
1640     case mul: __ fmul_s (at_rsp());                break;
1641     case div: __ fdivr_s(at_rsp());                break;
1642     case rem: __ fld_s  (at_rsp()); __ fremr(rax); break;
1643     default : ShouldNotReachHere();
1644     }
1645     __ f2ieee();
1646     __ pop(rax);  // pop second operand off the stack
1647 #endif // _LP64
1648   }
1649 }
1650 
1651 void TemplateTable::dop2(Operation op) {
1652   transition(dtos, dtos);
1653   if (UseSSE >= 2) {
1654     switch (op) {
1655     case add:
1656       __ addsd(xmm0, at_rsp());
1657       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1658       break;
1659     case sub:
1660       __ movdbl(xmm1, xmm0);
1661       __ pop_d(xmm0);
1662       __ subsd(xmm0, xmm1);
1663       break;
1664     case mul:
1665       __ mulsd(xmm0, at_rsp());
1666       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1667       break;
1668     case div:
1669       __ movdbl(xmm1, xmm0);
1670       __ pop_d(xmm0);
1671       __ divsd(xmm0, xmm1);
1672       break;
1673     case rem:
1674       // Similar to fop2(), the modulo operation is performed using the
1675       // SharedRuntime::drem method (on x86_64 platforms) or using the
1676       // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1677 #ifdef _LP64
1678       __ movdbl(xmm1, xmm0);
1679       __ pop_d(xmm0);
1680       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1681 #else // !_LP64
1682       __ push_d(xmm0);
1683       __ pop_d();
1684       __ fld_d(at_rsp());
1685       __ fremr(rax);
1686       __ d2ieee();
1687       __ pop(rax);
1688       __ pop(rdx);
1689       __ push_d();
1690       __ pop_d(xmm0);
1691 #endif // _LP64
1692       break;
1693     default:
1694       ShouldNotReachHere();
1695       break;
1696     }
1697   } else {
1698 #ifdef _LP64
1699     ShouldNotReachHere();
1700 #else // !_LP64
1701     switch (op) {
1702     case add: __ fadd_d (at_rsp());                break;
1703     case sub: __ fsubr_d(at_rsp());                break;
1704     case mul: {
1705       // strict semantics
1706       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
1707       __ fmulp();
1708       __ fmul_d (at_rsp());
1709       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
1710       __ fmulp();
1711       break;
1712     }
1713     case div: {
1714       // strict semantics
1715       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
1716       __ fmul_d (at_rsp());
1717       __ fdivrp();
1718       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
1719       __ fmulp();
1720       break;
1721     }
1722     case rem: __ fld_d  (at_rsp()); __ fremr(rax); break;
1723     default : ShouldNotReachHere();
1724     }
1725     __ d2ieee();
1726     // Pop double precision number from rsp.
1727     __ pop(rax);
1728     __ pop(rdx);
1729 #endif // _LP64
1730   }
1731 }
1732 
1733 void TemplateTable::ineg() {
1734   transition(itos, itos);
1735   __ negl(rax);
1736 }
1737 
1738 void TemplateTable::lneg() {
1739   transition(ltos, ltos);
1740   LP64_ONLY(__ negq(rax));
1741   NOT_LP64(__ lneg(rdx, rax));
1742 }
1743 
1744 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1745 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1746   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1747   // of 128-bits operands for SSE instructions.
1748   jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1749   // Store the value to a 128-bits operand.
1750   operand[0] = lo;
1751   operand[1] = hi;
1752   return operand;
1753 }
1754 
1755 // Buffer for 128-bits masks used by SSE instructions.
1756 static jlong float_signflip_pool[2*2];
1757 static jlong double_signflip_pool[2*2];
1758 
1759 void TemplateTable::fneg() {
1760   transition(ftos, ftos);
1761   if (UseSSE >= 1) {
1762     static jlong *float_signflip  = double_quadword(&float_signflip_pool[1],  CONST64(0x8000000080000000),  CONST64(0x8000000080000000));
1763     __ xorps(xmm0, ExternalAddress((address) float_signflip), rscratch1);
1764   } else {
1765     LP64_ONLY(ShouldNotReachHere());
1766     NOT_LP64(__ fchs());
1767   }
1768 }
1769 
1770 void TemplateTable::dneg() {
1771   transition(dtos, dtos);
1772   if (UseSSE >= 2) {
1773     static jlong *double_signflip =
1774       double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1775     __ xorpd(xmm0, ExternalAddress((address) double_signflip), rscratch1);
1776   } else {
1777 #ifdef _LP64
1778     ShouldNotReachHere();
1779 #else
1780     __ fchs();
1781 #endif
1782   }
1783 }
1784 
1785 void TemplateTable::iinc() {
1786   transition(vtos, vtos);
1787   __ load_signed_byte(rdx, at_bcp(2)); // get constant
1788   locals_index(rbx);
1789   __ addl(iaddress(rbx), rdx);
1790 }
1791 
1792 void TemplateTable::wide_iinc() {
1793   transition(vtos, vtos);
1794   __ movl(rdx, at_bcp(4)); // get constant
1795   locals_index_wide(rbx);
1796   __ bswapl(rdx); // swap bytes & sign-extend constant
1797   __ sarl(rdx, 16);
1798   __ addl(iaddress(rbx), rdx);
1799   // Note: should probably use only one movl to get both
1800   //       the index and the constant -> fix this
1801 }
1802 
1803 void TemplateTable::convert() {
1804 #ifdef _LP64
1805   // Checking
1806 #ifdef ASSERT
1807   {
1808     TosState tos_in  = ilgl;
1809     TosState tos_out = ilgl;
1810     switch (bytecode()) {
1811     case Bytecodes::_i2l: // fall through
1812     case Bytecodes::_i2f: // fall through
1813     case Bytecodes::_i2d: // fall through
1814     case Bytecodes::_i2b: // fall through
1815     case Bytecodes::_i2c: // fall through
1816     case Bytecodes::_i2s: tos_in = itos; break;
1817     case Bytecodes::_l2i: // fall through
1818     case Bytecodes::_l2f: // fall through
1819     case Bytecodes::_l2d: tos_in = ltos; break;
1820     case Bytecodes::_f2i: // fall through
1821     case Bytecodes::_f2l: // fall through
1822     case Bytecodes::_f2d: tos_in = ftos; break;
1823     case Bytecodes::_d2i: // fall through
1824     case Bytecodes::_d2l: // fall through
1825     case Bytecodes::_d2f: tos_in = dtos; break;
1826     default             : ShouldNotReachHere();
1827     }
1828     switch (bytecode()) {
1829     case Bytecodes::_l2i: // fall through
1830     case Bytecodes::_f2i: // fall through
1831     case Bytecodes::_d2i: // fall through
1832     case Bytecodes::_i2b: // fall through
1833     case Bytecodes::_i2c: // fall through
1834     case Bytecodes::_i2s: tos_out = itos; break;
1835     case Bytecodes::_i2l: // fall through
1836     case Bytecodes::_f2l: // fall through
1837     case Bytecodes::_d2l: tos_out = ltos; break;
1838     case Bytecodes::_i2f: // fall through
1839     case Bytecodes::_l2f: // fall through
1840     case Bytecodes::_d2f: tos_out = ftos; break;
1841     case Bytecodes::_i2d: // fall through
1842     case Bytecodes::_l2d: // fall through
1843     case Bytecodes::_f2d: tos_out = dtos; break;
1844     default             : ShouldNotReachHere();
1845     }
1846     transition(tos_in, tos_out);
1847   }
1848 #endif // ASSERT
1849 
1850   static const int64_t is_nan = 0x8000000000000000L;
1851 
1852   // Conversion
1853   switch (bytecode()) {
1854   case Bytecodes::_i2l:
1855     __ movslq(rax, rax);
1856     break;
1857   case Bytecodes::_i2f:
1858     __ cvtsi2ssl(xmm0, rax);
1859     break;
1860   case Bytecodes::_i2d:
1861     __ cvtsi2sdl(xmm0, rax);
1862     break;
1863   case Bytecodes::_i2b:
1864     __ movsbl(rax, rax);
1865     break;
1866   case Bytecodes::_i2c:
1867     __ movzwl(rax, rax);
1868     break;
1869   case Bytecodes::_i2s:
1870     __ movswl(rax, rax);
1871     break;
1872   case Bytecodes::_l2i:
1873     __ movl(rax, rax);
1874     break;
1875   case Bytecodes::_l2f:
1876     __ cvtsi2ssq(xmm0, rax);
1877     break;
1878   case Bytecodes::_l2d:
1879     __ cvtsi2sdq(xmm0, rax);
1880     break;
1881   case Bytecodes::_f2i:
1882   {
1883     Label L;
1884     __ cvttss2sil(rax, xmm0);
1885     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1886     __ jcc(Assembler::notEqual, L);
1887     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1888     __ bind(L);
1889   }
1890     break;
1891   case Bytecodes::_f2l:
1892   {
1893     Label L;
1894     __ cvttss2siq(rax, xmm0);
1895     // NaN or overflow/underflow?
1896     __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1);
1897     __ jcc(Assembler::notEqual, L);
1898     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1899     __ bind(L);
1900   }
1901     break;
1902   case Bytecodes::_f2d:
1903     __ cvtss2sd(xmm0, xmm0);
1904     break;
1905   case Bytecodes::_d2i:
1906   {
1907     Label L;
1908     __ cvttsd2sil(rax, xmm0);
1909     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1910     __ jcc(Assembler::notEqual, L);
1911     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1912     __ bind(L);
1913   }
1914     break;
1915   case Bytecodes::_d2l:
1916   {
1917     Label L;
1918     __ cvttsd2siq(rax, xmm0);
1919     // NaN or overflow/underflow?
1920     __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1);
1921     __ jcc(Assembler::notEqual, L);
1922     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1923     __ bind(L);
1924   }
1925     break;
1926   case Bytecodes::_d2f:
1927     __ cvtsd2ss(xmm0, xmm0);
1928     break;
1929   default:
1930     ShouldNotReachHere();
1931   }
1932 #else // !_LP64
1933   // Checking
1934 #ifdef ASSERT
1935   { TosState tos_in  = ilgl;
1936     TosState tos_out = ilgl;
1937     switch (bytecode()) {
1938       case Bytecodes::_i2l: // fall through
1939       case Bytecodes::_i2f: // fall through
1940       case Bytecodes::_i2d: // fall through
1941       case Bytecodes::_i2b: // fall through
1942       case Bytecodes::_i2c: // fall through
1943       case Bytecodes::_i2s: tos_in = itos; break;
1944       case Bytecodes::_l2i: // fall through
1945       case Bytecodes::_l2f: // fall through
1946       case Bytecodes::_l2d: tos_in = ltos; break;
1947       case Bytecodes::_f2i: // fall through
1948       case Bytecodes::_f2l: // fall through
1949       case Bytecodes::_f2d: tos_in = ftos; break;
1950       case Bytecodes::_d2i: // fall through
1951       case Bytecodes::_d2l: // fall through
1952       case Bytecodes::_d2f: tos_in = dtos; break;
1953       default             : ShouldNotReachHere();
1954     }
1955     switch (bytecode()) {
1956       case Bytecodes::_l2i: // fall through
1957       case Bytecodes::_f2i: // fall through
1958       case Bytecodes::_d2i: // fall through
1959       case Bytecodes::_i2b: // fall through
1960       case Bytecodes::_i2c: // fall through
1961       case Bytecodes::_i2s: tos_out = itos; break;
1962       case Bytecodes::_i2l: // fall through
1963       case Bytecodes::_f2l: // fall through
1964       case Bytecodes::_d2l: tos_out = ltos; break;
1965       case Bytecodes::_i2f: // fall through
1966       case Bytecodes::_l2f: // fall through
1967       case Bytecodes::_d2f: tos_out = ftos; break;
1968       case Bytecodes::_i2d: // fall through
1969       case Bytecodes::_l2d: // fall through
1970       case Bytecodes::_f2d: tos_out = dtos; break;
1971       default             : ShouldNotReachHere();
1972     }
1973     transition(tos_in, tos_out);
1974   }
1975 #endif // ASSERT
1976 
1977   // Conversion
1978   // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1979   switch (bytecode()) {
1980     case Bytecodes::_i2l:
1981       __ extend_sign(rdx, rax);
1982       break;
1983     case Bytecodes::_i2f:
1984       if (UseSSE >= 1) {
1985         __ cvtsi2ssl(xmm0, rax);
1986       } else {
1987         __ push(rax);          // store int on tos
1988         __ fild_s(at_rsp());   // load int to ST0
1989         __ f2ieee();           // truncate to float size
1990         __ pop(rcx);           // adjust rsp
1991       }
1992       break;
1993     case Bytecodes::_i2d:
1994       if (UseSSE >= 2) {
1995         __ cvtsi2sdl(xmm0, rax);
1996       } else {
1997       __ push(rax);          // add one slot for d2ieee()
1998       __ push(rax);          // store int on tos
1999       __ fild_s(at_rsp());   // load int to ST0
2000       __ d2ieee();           // truncate to double size
2001       __ pop(rcx);           // adjust rsp
2002       __ pop(rcx);
2003       }
2004       break;
2005     case Bytecodes::_i2b:
2006       __ shll(rax, 24);      // truncate upper 24 bits
2007       __ sarl(rax, 24);      // and sign-extend byte
2008       LP64_ONLY(__ movsbl(rax, rax));
2009       break;
2010     case Bytecodes::_i2c:
2011       __ andl(rax, 0xFFFF);  // truncate upper 16 bits
2012       LP64_ONLY(__ movzwl(rax, rax));
2013       break;
2014     case Bytecodes::_i2s:
2015       __ shll(rax, 16);      // truncate upper 16 bits
2016       __ sarl(rax, 16);      // and sign-extend short
2017       LP64_ONLY(__ movswl(rax, rax));
2018       break;
2019     case Bytecodes::_l2i:
2020       /* nothing to do */
2021       break;
2022     case Bytecodes::_l2f:
2023       // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
2024       // 64-bit long values to floats. On 32-bit platforms it is not possible
2025       // to use that instruction with 64-bit operands, therefore the FPU is
2026       // used to perform the conversion.
2027       __ push(rdx);          // store long on tos
2028       __ push(rax);
2029       __ fild_d(at_rsp());   // load long to ST0
2030       __ f2ieee();           // truncate to float size
2031       __ pop(rcx);           // adjust rsp
2032       __ pop(rcx);
2033       if (UseSSE >= 1) {
2034         __ push_f();
2035         __ pop_f(xmm0);
2036       }
2037       break;
2038     case Bytecodes::_l2d:
2039       // On 32-bit platforms the FPU is used for conversion because on
2040       // 32-bit platforms it is not not possible to use the cvtsi2sdq
2041       // instruction with 64-bit operands.
2042       __ push(rdx);          // store long on tos
2043       __ push(rax);
2044       __ fild_d(at_rsp());   // load long to ST0
2045       __ d2ieee();           // truncate to double size
2046       __ pop(rcx);           // adjust rsp
2047       __ pop(rcx);
2048       if (UseSSE >= 2) {
2049         __ push_d();
2050         __ pop_d(xmm0);
2051       }
2052       break;
2053     case Bytecodes::_f2i:
2054       // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
2055       // as it returns 0 for any NaN.
2056       if (UseSSE >= 1) {
2057         __ push_f(xmm0);
2058       } else {
2059         __ push(rcx);          // reserve space for argument
2060         __ fstp_s(at_rsp());   // pass float argument on stack
2061       }
2062       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2063       break;
2064     case Bytecodes::_f2l:
2065       // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
2066       // as it returns 0 for any NaN.
2067       if (UseSSE >= 1) {
2068        __ push_f(xmm0);
2069       } else {
2070         __ push(rcx);          // reserve space for argument
2071         __ fstp_s(at_rsp());   // pass float argument on stack
2072       }
2073       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2074       break;
2075     case Bytecodes::_f2d:
2076       if (UseSSE < 1) {
2077         /* nothing to do */
2078       } else if (UseSSE == 1) {
2079         __ push_f(xmm0);
2080         __ pop_f();
2081       } else { // UseSSE >= 2
2082         __ cvtss2sd(xmm0, xmm0);
2083       }
2084       break;
2085     case Bytecodes::_d2i:
2086       if (UseSSE >= 2) {
2087         __ push_d(xmm0);
2088       } else {
2089         __ push(rcx);          // reserve space for argument
2090         __ push(rcx);
2091         __ fstp_d(at_rsp());   // pass double argument on stack
2092       }
2093       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2094       break;
2095     case Bytecodes::_d2l:
2096       if (UseSSE >= 2) {
2097         __ push_d(xmm0);
2098       } else {
2099         __ push(rcx);          // reserve space for argument
2100         __ push(rcx);
2101         __ fstp_d(at_rsp());   // pass double argument on stack
2102       }
2103       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2104       break;
2105     case Bytecodes::_d2f:
2106       if (UseSSE <= 1) {
2107         __ push(rcx);          // reserve space for f2ieee()
2108         __ f2ieee();           // truncate to float size
2109         __ pop(rcx);           // adjust rsp
2110         if (UseSSE == 1) {
2111           // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2112           // the conversion is performed using the FPU in this case.
2113           __ push_f();
2114           __ pop_f(xmm0);
2115         }
2116       } else { // UseSSE >= 2
2117         __ cvtsd2ss(xmm0, xmm0);
2118       }
2119       break;
2120     default             :
2121       ShouldNotReachHere();
2122   }
2123 #endif // _LP64
2124 }
2125 
2126 void TemplateTable::lcmp() {
2127   transition(ltos, itos);
2128 #ifdef _LP64
2129   Label done;
2130   __ pop_l(rdx);
2131   __ cmpq(rdx, rax);
2132   __ movl(rax, -1);
2133   __ jccb(Assembler::less, done);
2134   __ setb(Assembler::notEqual, rax);
2135   __ movzbl(rax, rax);
2136   __ bind(done);
2137 #else
2138 
2139   // y = rdx:rax
2140   __ pop_l(rbx, rcx);             // get x = rcx:rbx
2141   __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2142   __ mov(rax, rcx);
2143 #endif
2144 }
2145 
2146 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2147   if ((is_float && UseSSE >= 1) ||
2148       (!is_float && UseSSE >= 2)) {
2149     Label done;
2150     if (is_float) {
2151       // XXX get rid of pop here, use ... reg, mem32
2152       __ pop_f(xmm1);
2153       __ ucomiss(xmm1, xmm0);
2154     } else {
2155       // XXX get rid of pop here, use ... reg, mem64
2156       __ pop_d(xmm1);
2157       __ ucomisd(xmm1, xmm0);
2158     }
2159     if (unordered_result < 0) {
2160       __ movl(rax, -1);
2161       __ jccb(Assembler::parity, done);
2162       __ jccb(Assembler::below, done);
2163       __ setb(Assembler::notEqual, rdx);
2164       __ movzbl(rax, rdx);
2165     } else {
2166       __ movl(rax, 1);
2167       __ jccb(Assembler::parity, done);
2168       __ jccb(Assembler::above, done);
2169       __ movl(rax, 0);
2170       __ jccb(Assembler::equal, done);
2171       __ decrementl(rax);
2172     }
2173     __ bind(done);
2174   } else {
2175 #ifdef _LP64
2176     ShouldNotReachHere();
2177 #else // !_LP64
2178     if (is_float) {
2179       __ fld_s(at_rsp());
2180     } else {
2181       __ fld_d(at_rsp());
2182       __ pop(rdx);
2183     }
2184     __ pop(rcx);
2185     __ fcmp2int(rax, unordered_result < 0);
2186 #endif // _LP64
2187   }
2188 }
2189 
2190 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2191   __ get_method(rcx); // rcx holds method
2192   __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2193                                      // holds bumped taken count
2194 
2195   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2196                              InvocationCounter::counter_offset();
2197   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2198                               InvocationCounter::counter_offset();
2199 
2200   // Load up edx with the branch displacement
2201   if (is_wide) {
2202     __ movl(rdx, at_bcp(1));
2203   } else {
2204     __ load_signed_short(rdx, at_bcp(1));
2205   }
2206   __ bswapl(rdx);
2207 
2208   if (!is_wide) {
2209     __ sarl(rdx, 16);
2210   }
2211   LP64_ONLY(__ movl2ptr(rdx, rdx));
2212 
2213   // Handle all the JSR stuff here, then exit.
2214   // It's much shorter and cleaner than intermingling with the non-JSR
2215   // normal-branch stuff occurring below.
2216   if (is_jsr) {
2217     // Pre-load the next target bytecode into rbx
2218     __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2219 
2220     // compute return address as bci in rax
2221     __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2222                         in_bytes(ConstMethod::codes_offset())));
2223     __ subptr(rax, Address(rcx, Method::const_offset()));
2224     // Adjust the bcp in r13 by the displacement in rdx
2225     __ addptr(rbcp, rdx);
2226     // jsr returns atos that is not an oop
2227     __ push_i(rax);
2228     __ dispatch_only(vtos, true);
2229     return;
2230   }
2231 
2232   // Normal (non-jsr) branch handling
2233 
2234   // Adjust the bcp in r13 by the displacement in rdx
2235   __ addptr(rbcp, rdx);
2236 
2237   assert(UseLoopCounter || !UseOnStackReplacement,
2238          "on-stack-replacement requires loop counters");
2239   Label backedge_counter_overflow;
2240   Label dispatch;
2241   if (UseLoopCounter) {
2242     // increment backedge counter for backward branches
2243     // rax: MDO
2244     // rbx: MDO bumped taken-count
2245     // rcx: method
2246     // rdx: target offset
2247     // r13: target bcp
2248     // r14: locals pointer
2249     __ testl(rdx, rdx);             // check if forward or backward branch
2250     __ jcc(Assembler::positive, dispatch); // count only if backward branch
2251 
2252     // check if MethodCounters exists
2253     Label has_counters;
2254     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2255     __ testptr(rax, rax);
2256     __ jcc(Assembler::notZero, has_counters);
2257     __ push(rdx);
2258     __ push(rcx);
2259     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2260                rcx);
2261     __ pop(rcx);
2262     __ pop(rdx);
2263     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2264     __ testptr(rax, rax);
2265     __ jcc(Assembler::zero, dispatch);
2266     __ bind(has_counters);
2267 
2268     Label no_mdo;
2269     if (ProfileInterpreter) {
2270       // Are we profiling?
2271       __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2272       __ testptr(rbx, rbx);
2273       __ jccb(Assembler::zero, no_mdo);
2274       // Increment the MDO backedge counter
2275       const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2276           in_bytes(InvocationCounter::counter_offset()));
2277       const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2278       __ increment_mask_and_jump(mdo_backedge_counter, mask, rax,
2279           UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
2280       __ jmp(dispatch);
2281     }
2282     __ bind(no_mdo);
2283     // Increment backedge counter in MethodCounters*
2284     __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2285     const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2286     __ increment_mask_and_jump(Address(rcx, be_offset), mask, rax,
2287         UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
2288     __ bind(dispatch);
2289   }
2290 
2291   // Pre-load the next target bytecode into rbx
2292   __ load_unsigned_byte(rbx, Address(rbcp, 0));
2293 
2294   // continue with the bytecode @ target
2295   // rax: return bci for jsr's, unused otherwise
2296   // rbx: target bytecode
2297   // r13: target bcp
2298   __ dispatch_only(vtos, true);
2299 
2300   if (UseLoopCounter) {
2301     if (UseOnStackReplacement) {
2302       Label set_mdp;
2303       // invocation counter overflow
2304       __ bind(backedge_counter_overflow);
2305       __ negptr(rdx);
2306       __ addptr(rdx, rbcp); // branch bcp
2307       // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2308       __ call_VM(noreg,
2309                  CAST_FROM_FN_PTR(address,
2310                                   InterpreterRuntime::frequency_counter_overflow),
2311                  rdx);
2312 
2313       // rax: osr nmethod (osr ok) or null (osr not possible)
2314       // rdx: scratch
2315       // r14: locals pointer
2316       // r13: bcp
2317       __ testptr(rax, rax);                        // test result
2318       __ jcc(Assembler::zero, dispatch);         // no osr if null
2319       // nmethod may have been invalidated (VM may block upon call_VM return)
2320       __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2321       __ jcc(Assembler::notEqual, dispatch);
2322 
2323       // We have the address of an on stack replacement routine in rax.
2324       // In preparation of invoking it, first we must migrate the locals
2325       // and monitors from off the interpreter frame on the stack.
2326       // Ensure to save the osr nmethod over the migration call,
2327       // it will be preserved in rbx.
2328       __ mov(rbx, rax);
2329 
2330       NOT_LP64(__ get_thread(rcx));
2331 
2332       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2333 
2334       // rax is OSR buffer, move it to expected parameter location
2335       LP64_ONLY(__ mov(j_rarg0, rax));
2336       NOT_LP64(__ mov(rcx, rax));
2337       // We use j_rarg definitions here so that registers don't conflict as parameter
2338       // registers change across platforms as we are in the midst of a calling
2339       // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2340 
2341       const Register retaddr   = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2342       const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2343 
2344       // pop the interpreter frame
2345       __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2346       __ leave();                                // remove frame anchor
2347       __ pop(retaddr);                           // get return address
2348       __ mov(rsp, sender_sp);                   // set sp to sender sp
2349       // Ensure compiled code always sees stack at proper alignment
2350       __ andptr(rsp, -(StackAlignmentInBytes));
2351 
2352       // unlike x86 we need no specialized return from compiled code
2353       // to the interpreter or the call stub.
2354 
2355       // push the return address
2356       __ push(retaddr);
2357 
2358       // and begin the OSR nmethod
2359       __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2360     }
2361   }
2362 }
2363 
2364 void TemplateTable::if_0cmp(Condition cc) {
2365   transition(itos, vtos);
2366   // assume branch is more often taken than not (loops use backward branches)
2367   Label not_taken;
2368   __ testl(rax, rax);
2369   __ jcc(j_not(cc), not_taken);
2370   branch(false, false);
2371   __ bind(not_taken);
2372   __ profile_not_taken_branch(rax);
2373 }
2374 
2375 void TemplateTable::if_icmp(Condition cc) {
2376   transition(itos, vtos);
2377   // assume branch is more often taken than not (loops use backward branches)
2378   Label not_taken;
2379   __ pop_i(rdx);
2380   __ cmpl(rdx, rax);
2381   __ jcc(j_not(cc), not_taken);
2382   branch(false, false);
2383   __ bind(not_taken);
2384   __ profile_not_taken_branch(rax);
2385 }
2386 
2387 void TemplateTable::if_nullcmp(Condition cc) {
2388   transition(atos, vtos);
2389   // assume branch is more often taken than not (loops use backward branches)
2390   Label not_taken;
2391   __ testptr(rax, rax);
2392   __ jcc(j_not(cc), not_taken);
2393   branch(false, false);
2394   __ bind(not_taken);
2395   __ profile_not_taken_branch(rax);
2396 }
2397 
2398 void TemplateTable::if_acmp(Condition cc) {
2399   transition(atos, vtos);
2400   // assume branch is more often taken than not (loops use backward branches)
2401   Label taken, not_taken;
2402   __ pop_ptr(rdx);
2403 
2404   __ profile_acmp(rbx, rdx, rax, rcx);
2405 
2406   const int is_inline_type_mask = markWord::inline_type_pattern;
2407   if (EnableValhalla) {
2408     __ cmpoop(rdx, rax);
2409     __ jcc(Assembler::equal, (cc == equal) ? taken : not_taken);
2410 
2411     // might be substitutable, test if either rax or rdx is null
2412     __ testptr(rax, rax);
2413     __ jcc(Assembler::zero, (cc == equal) ? not_taken : taken);
2414     __ testptr(rdx, rdx);
2415     __ jcc(Assembler::zero, (cc == equal) ? not_taken : taken);
2416 
2417     // and both are values ?
2418     __ movptr(rbx, Address(rdx, oopDesc::mark_offset_in_bytes()));
2419     __ andptr(rbx, Address(rax, oopDesc::mark_offset_in_bytes()));
2420     __ andptr(rbx, is_inline_type_mask);
2421     __ cmpptr(rbx, is_inline_type_mask);
2422     __ jcc(Assembler::notEqual, (cc == equal) ? not_taken : taken);
2423 
2424     // same value klass ?
2425     __ load_metadata(rbx, rdx);
2426     __ load_metadata(rcx, rax);
2427     __ cmpptr(rbx, rcx);
2428     __ jcc(Assembler::notEqual, (cc == equal) ? not_taken : taken);
2429 
2430     // Know both are the same type, let's test for substitutability...
2431     if (cc == equal) {
2432       invoke_is_substitutable(rax, rdx, taken, not_taken);
2433     } else {
2434       invoke_is_substitutable(rax, rdx, not_taken, taken);
2435     }
2436     __ stop("Not reachable");
2437   }
2438 
2439   __ cmpoop(rdx, rax);
2440   __ jcc(j_not(cc), not_taken);
2441   __ bind(taken);
2442   branch(false, false);
2443   __ bind(not_taken);
2444   __ profile_not_taken_branch(rax, true);
2445 }
2446 
2447 void TemplateTable::invoke_is_substitutable(Register aobj, Register bobj,
2448                                             Label& is_subst, Label& not_subst) {
2449   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::is_substitutable), aobj, bobj);
2450   // Restored...rax answer, jmp to outcome...
2451   __ testl(rax, rax);
2452   __ jcc(Assembler::zero, not_subst);
2453   __ jmp(is_subst);
2454 }
2455 
2456 void TemplateTable::ret() {
2457   transition(vtos, vtos);
2458   locals_index(rbx);
2459   LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2460   NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2461   __ profile_ret(rbx, rcx);
2462   __ get_method(rax);
2463   __ movptr(rbcp, Address(rax, Method::const_offset()));
2464   __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2465                       ConstMethod::codes_offset()));
2466   __ dispatch_next(vtos, 0, true);
2467 }
2468 
2469 void TemplateTable::wide_ret() {
2470   transition(vtos, vtos);
2471   locals_index_wide(rbx);
2472   __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2473   __ profile_ret(rbx, rcx);
2474   __ get_method(rax);
2475   __ movptr(rbcp, Address(rax, Method::const_offset()));
2476   __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2477   __ dispatch_next(vtos, 0, true);
2478 }
2479 
2480 void TemplateTable::tableswitch() {
2481   Label default_case, continue_execution;
2482   transition(itos, vtos);
2483 
2484   // align r13/rsi
2485   __ lea(rbx, at_bcp(BytesPerInt));
2486   __ andptr(rbx, -BytesPerInt);
2487   // load lo & hi
2488   __ movl(rcx, Address(rbx, BytesPerInt));
2489   __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2490   __ bswapl(rcx);
2491   __ bswapl(rdx);
2492   // check against lo & hi
2493   __ cmpl(rax, rcx);
2494   __ jcc(Assembler::less, default_case);
2495   __ cmpl(rax, rdx);
2496   __ jcc(Assembler::greater, default_case);
2497   // lookup dispatch offset
2498   __ subl(rax, rcx);
2499   __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2500   __ profile_switch_case(rax, rbx, rcx);
2501   // continue execution
2502   __ bind(continue_execution);
2503   __ bswapl(rdx);
2504   LP64_ONLY(__ movl2ptr(rdx, rdx));
2505   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2506   __ addptr(rbcp, rdx);
2507   __ dispatch_only(vtos, true);
2508   // handle default
2509   __ bind(default_case);
2510   __ profile_switch_default(rax);
2511   __ movl(rdx, Address(rbx, 0));
2512   __ jmp(continue_execution);
2513 }
2514 
2515 void TemplateTable::lookupswitch() {
2516   transition(itos, itos);
2517   __ stop("lookupswitch bytecode should have been rewritten");
2518 }
2519 
2520 void TemplateTable::fast_linearswitch() {
2521   transition(itos, vtos);
2522   Label loop_entry, loop, found, continue_execution;
2523   // bswap rax so we can avoid bswapping the table entries
2524   __ bswapl(rax);
2525   // align r13
2526   __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2527                                     // this instruction (change offsets
2528                                     // below)
2529   __ andptr(rbx, -BytesPerInt);
2530   // set counter
2531   __ movl(rcx, Address(rbx, BytesPerInt));
2532   __ bswapl(rcx);
2533   __ jmpb(loop_entry);
2534   // table search
2535   __ bind(loop);
2536   __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2537   __ jcc(Assembler::equal, found);
2538   __ bind(loop_entry);
2539   __ decrementl(rcx);
2540   __ jcc(Assembler::greaterEqual, loop);
2541   // default case
2542   __ profile_switch_default(rax);
2543   __ movl(rdx, Address(rbx, 0));
2544   __ jmp(continue_execution);
2545   // entry found -> get offset
2546   __ bind(found);
2547   __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2548   __ profile_switch_case(rcx, rax, rbx);
2549   // continue execution
2550   __ bind(continue_execution);
2551   __ bswapl(rdx);
2552   __ movl2ptr(rdx, rdx);
2553   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2554   __ addptr(rbcp, rdx);
2555   __ dispatch_only(vtos, true);
2556 }
2557 
2558 void TemplateTable::fast_binaryswitch() {
2559   transition(itos, vtos);
2560   // Implementation using the following core algorithm:
2561   //
2562   // int binary_search(int key, LookupswitchPair* array, int n) {
2563   //   // Binary search according to "Methodik des Programmierens" by
2564   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2565   //   int i = 0;
2566   //   int j = n;
2567   //   while (i+1 < j) {
2568   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2569   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2570   //     // where a stands for the array and assuming that the (inexisting)
2571   //     // element a[n] is infinitely big.
2572   //     int h = (i + j) >> 1;
2573   //     // i < h < j
2574   //     if (key < array[h].fast_match()) {
2575   //       j = h;
2576   //     } else {
2577   //       i = h;
2578   //     }
2579   //   }
2580   //   // R: a[i] <= key < a[i+1] or Q
2581   //   // (i.e., if key is within array, i is the correct index)
2582   //   return i;
2583   // }
2584 
2585   // Register allocation
2586   const Register key   = rax; // already set (tosca)
2587   const Register array = rbx;
2588   const Register i     = rcx;
2589   const Register j     = rdx;
2590   const Register h     = rdi;
2591   const Register temp  = rsi;
2592 
2593   // Find array start
2594   NOT_LP64(__ save_bcp());
2595 
2596   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2597                                           // get rid of this
2598                                           // instruction (change
2599                                           // offsets below)
2600   __ andptr(array, -BytesPerInt);
2601 
2602   // Initialize i & j
2603   __ xorl(i, i);                            // i = 0;
2604   __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2605 
2606   // Convert j into native byteordering
2607   __ bswapl(j);
2608 
2609   // And start
2610   Label entry;
2611   __ jmp(entry);
2612 
2613   // binary search loop
2614   {
2615     Label loop;
2616     __ bind(loop);
2617     // int h = (i + j) >> 1;
2618     __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2619     __ sarl(h, 1);                               // h = (i + j) >> 1;
2620     // if (key < array[h].fast_match()) {
2621     //   j = h;
2622     // } else {
2623     //   i = h;
2624     // }
2625     // Convert array[h].match to native byte-ordering before compare
2626     __ movl(temp, Address(array, h, Address::times_8));
2627     __ bswapl(temp);
2628     __ cmpl(key, temp);
2629     // j = h if (key <  array[h].fast_match())
2630     __ cmov32(Assembler::less, j, h);
2631     // i = h if (key >= array[h].fast_match())
2632     __ cmov32(Assembler::greaterEqual, i, h);
2633     // while (i+1 < j)
2634     __ bind(entry);
2635     __ leal(h, Address(i, 1)); // i+1
2636     __ cmpl(h, j);             // i+1 < j
2637     __ jcc(Assembler::less, loop);
2638   }
2639 
2640   // end of binary search, result index is i (must check again!)
2641   Label default_case;
2642   // Convert array[i].match to native byte-ordering before compare
2643   __ movl(temp, Address(array, i, Address::times_8));
2644   __ bswapl(temp);
2645   __ cmpl(key, temp);
2646   __ jcc(Assembler::notEqual, default_case);
2647 
2648   // entry found -> j = offset
2649   __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2650   __ profile_switch_case(i, key, array);
2651   __ bswapl(j);
2652   LP64_ONLY(__ movslq(j, j));
2653 
2654   NOT_LP64(__ restore_bcp());
2655   NOT_LP64(__ restore_locals());                           // restore rdi
2656 
2657   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2658   __ addptr(rbcp, j);
2659   __ dispatch_only(vtos, true);
2660 
2661   // default case -> j = default offset
2662   __ bind(default_case);
2663   __ profile_switch_default(i);
2664   __ movl(j, Address(array, -2 * BytesPerInt));
2665   __ bswapl(j);
2666   LP64_ONLY(__ movslq(j, j));
2667 
2668   NOT_LP64(__ restore_bcp());
2669   NOT_LP64(__ restore_locals());
2670 
2671   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2672   __ addptr(rbcp, j);
2673   __ dispatch_only(vtos, true);
2674 }
2675 
2676 void TemplateTable::_return(TosState state) {
2677   transition(state, state);
2678 
2679   assert(_desc->calls_vm(),
2680          "inconsistent calls_vm information"); // call in remove_activation
2681 
2682   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2683     assert(state == vtos, "only valid state");
2684     Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2685     __ movptr(robj, aaddress(0));
2686     __ load_klass(rdi, robj, rscratch1);
2687     __ testb(Address(rdi, Klass::misc_flags_offset()), KlassFlags::_misc_has_finalizer);
2688     Label skip_register_finalizer;
2689     __ jcc(Assembler::zero, skip_register_finalizer);
2690 
2691     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2692 
2693     __ bind(skip_register_finalizer);
2694   }
2695 
2696   if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2697     Label no_safepoint;
2698     NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2699 #ifdef _LP64
2700     __ testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2701 #else
2702     const Register thread = rdi;
2703     __ get_thread(thread);
2704     __ testb(Address(thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2705 #endif
2706     __ jcc(Assembler::zero, no_safepoint);
2707     __ push(state);
2708     __ push_cont_fastpath();
2709     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2710                                        InterpreterRuntime::at_safepoint));
2711     __ pop_cont_fastpath();
2712     __ pop(state);
2713     __ bind(no_safepoint);
2714   }
2715 
2716   // Narrow result if state is itos but result type is smaller.
2717   // Need to narrow in the return bytecode rather than in generate_return_entry
2718   // since compiled code callers expect the result to already be narrowed.
2719   if (state == itos) {
2720     __ narrow(rax);
2721   }
2722 
2723   __ remove_activation(state, rbcp, true, true, true);
2724 
2725   __ jmp(rbcp);
2726 }
2727 
2728 // ----------------------------------------------------------------------------
2729 // Volatile variables demand their effects be made known to all CPU's
2730 // in order.  Store buffers on most chips allow reads & writes to
2731 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2732 // without some kind of memory barrier (i.e., it's not sufficient that
2733 // the interpreter does not reorder volatile references, the hardware
2734 // also must not reorder them).
2735 //
2736 // According to the new Java Memory Model (JMM):
2737 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2738 //     writes act as acquire & release, so:
2739 // (2) A read cannot let unrelated NON-volatile memory refs that
2740 //     happen after the read float up to before the read.  It's OK for
2741 //     non-volatile memory refs that happen before the volatile read to
2742 //     float down below it.
2743 // (3) Similar a volatile write cannot let unrelated NON-volatile
2744 //     memory refs that happen BEFORE the write float down to after the
2745 //     write.  It's OK for non-volatile memory refs that happen after the
2746 //     volatile write to float up before it.
2747 //
2748 // We only put in barriers around volatile refs (they are expensive),
2749 // not _between_ memory refs (that would require us to track the
2750 // flavor of the previous memory refs).  Requirements (2) and (3)
2751 // require some barriers before volatile stores and after volatile
2752 // loads.  These nearly cover requirement (1) but miss the
2753 // volatile-store-volatile-load case.  This final case is placed after
2754 // volatile-stores although it could just as well go before
2755 // volatile-loads.
2756 
2757 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2758   // Helper function to insert a is-volatile test and memory barrier
2759   __ membar(order_constraint);
2760 }
2761 
2762 void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
2763                                                        Register cache,
2764                                                        Register index) {
2765   const Register temp = rbx;
2766   assert_different_registers(cache, index, temp);
2767 
2768   Label L_clinit_barrier_slow;
2769   Label resolved;
2770 
2771   Bytecodes::Code code = bytecode();
2772 
2773   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2774 
2775   __ load_method_entry(cache, index);
2776   switch(byte_no) {
2777     case f1_byte:
2778       __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedMethodEntry::bytecode1_offset())));
2779       break;
2780     case f2_byte:
2781       __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedMethodEntry::bytecode2_offset())));
2782       break;
2783     default:
2784       ShouldNotReachHere();
2785   }
2786   __ cmpl(temp, code);  // have we resolved this bytecode?
2787   __ jcc(Assembler::equal, resolved);
2788 
2789   // resolve first time through
2790   // Class initialization barrier slow path lands here as well.
2791   __ bind(L_clinit_barrier_slow);
2792   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2793   __ movl(temp, code);
2794   __ call_VM(noreg, entry, temp);
2795   // Update registers with resolved info
2796   __ load_method_entry(cache, index);
2797 
2798   __ bind(resolved);
2799 
2800   // Class initialization barrier for static methods
2801   if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2802     const Register method = temp;
2803     const Register klass  = temp;
2804     const Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2805     assert(thread != noreg, "x86_32 not supported");
2806 
2807     __ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2808     __ load_method_holder(klass, method);
2809     __ clinit_barrier(klass, thread, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
2810   }
2811 }
2812 
2813 void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
2814                                             Register cache,
2815                                             Register index) {
2816   const Register temp = rbx;
2817   assert_different_registers(cache, index, temp);
2818 
2819   Label resolved;
2820 
2821   Bytecodes::Code code = bytecode();
2822   switch (code) {
2823     case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2824     case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2825     default: break;
2826   }
2827 
2828   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2829   __ load_field_entry(cache, index);
2830   if (byte_no == f1_byte) {
2831     __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedFieldEntry::get_code_offset())));
2832   } else {
2833     __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedFieldEntry::put_code_offset())));
2834   }
2835   __ cmpl(temp, code);  // have we resolved this bytecode?
2836   __ jcc(Assembler::equal, resolved);
2837 
2838   // resolve first time through
2839   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2840   __ movl(temp, code);
2841   __ call_VM(noreg, entry, temp);
2842   // Update registers with resolved info
2843   __ load_field_entry(cache, index);
2844 
2845   __ bind(resolved);
2846 }
2847 
2848 void TemplateTable::load_resolved_field_entry(Register obj,
2849                                               Register cache,
2850                                               Register tos_state,
2851                                               Register offset,
2852                                               Register flags,
2853                                               bool is_static = false) {
2854   assert_different_registers(cache, tos_state, flags, offset);
2855 
2856   // Field offset
2857   __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
2858 
2859   // Flags
2860   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset())));
2861 
2862   // TOS state
2863   __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset())));
2864 
2865   // Klass overwrite register
2866   if (is_static) {
2867     __ movptr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset()));
2868     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2869     __ movptr(obj, Address(obj, mirror_offset));
2870     __ resolve_oop_handle(obj, rscratch2);
2871   }
2872 
2873 }
2874 
2875 void TemplateTable::load_invokedynamic_entry(Register method) {
2876   // setup registers
2877   const Register appendix = rax;
2878   const Register cache = rcx;
2879   const Register index = rdx;
2880   assert_different_registers(method, appendix, cache, index);
2881 
2882   __ save_bcp();
2883 
2884   Label resolved;
2885 
2886   __ load_resolved_indy_entry(cache, index);
2887   __ movptr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2888 
2889   // Compare the method to zero
2890   __ testptr(method, method);
2891   __ jcc(Assembler::notZero, resolved);
2892 
2893   Bytecodes::Code code = bytecode();
2894 
2895   // Call to the interpreter runtime to resolve invokedynamic
2896   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2897   __ movl(method, code); // this is essentially Bytecodes::_invokedynamic
2898   __ call_VM(noreg, entry, method);
2899   // Update registers with resolved info
2900   __ load_resolved_indy_entry(cache, index);
2901   __ movptr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2902 
2903 #ifdef ASSERT
2904   __ testptr(method, method);
2905   __ jcc(Assembler::notZero, resolved);
2906   __ stop("Should be resolved by now");
2907 #endif // ASSERT
2908   __ bind(resolved);
2909 
2910   Label L_no_push;
2911   // Check if there is an appendix
2912   __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())));
2913   __ testl(index, (1 << ResolvedIndyEntry::has_appendix_shift));
2914   __ jcc(Assembler::zero, L_no_push);
2915 
2916   // Get appendix
2917   __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())));
2918   // Push the appendix as a trailing parameter
2919   // since the parameter_size includes it.
2920   __ load_resolved_reference_at_index(appendix, index);
2921   __ verify_oop(appendix);
2922   __ push(appendix);  // push appendix (MethodType, CallSite, etc.)
2923   __ bind(L_no_push);
2924 
2925   // compute return type
2926   __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())));
2927   // load return address
2928   {
2929     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2930     ExternalAddress table(table_addr);
2931 #ifdef _LP64
2932     __ lea(rscratch1, table);
2933     __ movptr(index, Address(rscratch1, index, Address::times_ptr));
2934 #else
2935     __ movptr(index, ArrayAddress(table, Address(noreg, index, Address::times_ptr)));
2936 #endif // _LP64
2937   }
2938 
2939   // push return address
2940   __ push(index);
2941 }
2942 
2943 void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
2944                                                                  Register method,
2945                                                                  Register flags) {
2946   // setup registers
2947   const Register index = rdx;
2948   assert_different_registers(cache, index);
2949   assert_different_registers(method, cache, flags);
2950 
2951   // determine constant pool cache field offsets
2952   resolve_cache_and_index_for_method(f1_byte, cache, index);
2953   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2954   __ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2955 }
2956 
2957 void TemplateTable::load_resolved_method_entry_handle(Register cache,
2958                                                Register method,
2959                                                Register ref_index,
2960                                                Register flags) {
2961   // setup registers
2962   const Register index = rdx;
2963   assert_different_registers(cache, index);
2964   assert_different_registers(cache, method, ref_index, flags);
2965 
2966   // determine constant pool cache field offsets
2967   resolve_cache_and_index_for_method(f1_byte, cache, index);
2968   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2969 
2970   // Maybe push appendix
2971   Label L_no_push;
2972   __ testl(flags, (1 << ResolvedMethodEntry::has_appendix_shift));
2973   __ jcc(Assembler::zero, L_no_push);
2974   // invokehandle uses an index into the resolved references array
2975   __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
2976   // Push the appendix as a trailing parameter.
2977   // This must be done before we get the receiver,
2978   // since the parameter_size includes it.
2979   Register appendix = method;
2980   __ load_resolved_reference_at_index(appendix, ref_index);
2981   __ push(appendix);  // push appendix (MethodType, CallSite, etc.)
2982   __ bind(L_no_push);
2983 
2984   __ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2985 }
2986 
2987 void TemplateTable::load_resolved_method_entry_interface(Register cache,
2988                                                          Register klass,
2989                                                          Register method_or_table_index,
2990                                                          Register flags) {
2991   // setup registers
2992   const Register index = rdx;
2993   assert_different_registers(cache, klass, method_or_table_index, flags);
2994 
2995   // determine constant pool cache field offsets
2996   resolve_cache_and_index_for_method(f1_byte, cache, index);
2997   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2998 
2999   // Invokeinterface can behave in different ways:
3000   // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
3001   // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
3002   // vtable index is placed in the register.
3003   // Otherwise, the registers will be populated with the klass and method.
3004 
3005   Label NotVirtual; Label NotVFinal; Label Done;
3006   __ testl(flags, 1 << ResolvedMethodEntry::is_forced_virtual_shift);
3007   __ jcc(Assembler::zero, NotVirtual);
3008   __ testl(flags, (1 << ResolvedMethodEntry::is_vfinal_shift));
3009   __ jcc(Assembler::zero, NotVFinal);
3010   __ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
3011   __ jmp(Done);
3012 
3013   __ bind(NotVFinal);
3014   __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
3015   __ jmp(Done);
3016 
3017   __ bind(NotVirtual);
3018   __ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
3019   __ movptr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
3020   __ bind(Done);
3021 }
3022 
3023 void TemplateTable::load_resolved_method_entry_virtual(Register cache,
3024                                                        Register method_or_table_index,
3025                                                        Register flags) {
3026   // setup registers
3027   const Register index = rdx;
3028   assert_different_registers(index, cache);
3029   assert_different_registers(method_or_table_index, cache, flags);
3030 
3031   // determine constant pool cache field offsets
3032   resolve_cache_and_index_for_method(f2_byte, cache, index);
3033   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
3034 
3035   // method_or_table_index can either be an itable index or a method depending on the virtual final flag
3036   Label isVFinal; Label Done;
3037   __ testl(flags, (1 << ResolvedMethodEntry::is_vfinal_shift));
3038   __ jcc(Assembler::notZero, isVFinal);
3039   __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
3040   __ jmp(Done);
3041   __ bind(isVFinal);
3042   __ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
3043   __ bind(Done);
3044 }
3045 
3046 // The registers cache and index expected to be set before call.
3047 // Correct values of the cache and index registers are preserved.
3048 void TemplateTable::jvmti_post_field_access(Register cache,
3049                                             Register index,
3050                                             bool is_static,
3051                                             bool has_tos) {
3052   if (JvmtiExport::can_post_field_access()) {
3053     // Check to see if a field access watch has been set before we take
3054     // the time to call into the VM.
3055     Label L1;
3056     assert_different_registers(cache, index, rax);
3057     __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3058     __ testl(rax,rax);
3059     __ jcc(Assembler::zero, L1);
3060 
3061     // cache entry pointer
3062     __ load_field_entry(cache, index);
3063     if (is_static) {
3064       __ xorptr(rax, rax);      // null object reference
3065     } else {
3066       __ pop(atos);         // Get the object
3067       __ verify_oop(rax);
3068       __ push(atos);        // Restore stack state
3069     }
3070     // rax,:   object pointer or null
3071     // cache: cache entry pointer
3072     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3073               rax, cache);
3074 
3075     __ load_field_entry(cache, index);
3076     __ bind(L1);
3077   }
3078 }
3079 
3080 void TemplateTable::pop_and_check_object(Register r) {
3081   __ pop_ptr(r);
3082   __ null_check(r);  // for field access must check obj.
3083   __ verify_oop(r);
3084 }
3085 
3086 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3087   transition(vtos, vtos);
3088 
3089   const Register obj   = LP64_ONLY(r9) NOT_LP64(rcx);
3090   const Register cache = rcx;
3091   const Register index = rdx;
3092   const Register off   = rbx;
3093   const Register tos_state   = rax;
3094   const Register flags = rdx;
3095   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
3096 
3097   resolve_cache_and_index_for_field(byte_no, cache, index);
3098   jvmti_post_field_access(cache, index, is_static, false);
3099   load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
3100 
3101   const Address field(obj, off, Address::times_1, 0*wordSize);
3102 
3103   Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notInlineType;
3104 
3105   // Make sure we don't need to mask edx after the above shift
3106   assert(btos == 0, "change code, btos != 0");
3107   __ testl(tos_state, tos_state);
3108   __ jcc(Assembler::notZero, notByte);
3109 
3110   // btos
3111   if (!is_static) pop_and_check_object(obj);
3112   __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3113   __ push(btos);
3114   // Rewrite bytecode to be faster
3115   if (!is_static && rc == may_rewrite) {
3116     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
3117   }
3118   __ jmp(Done);
3119 
3120   __ bind(notByte);
3121   __ cmpl(tos_state, ztos);
3122   __ jcc(Assembler::notEqual, notBool);
3123    if (!is_static) pop_and_check_object(obj);
3124   // ztos (same code as btos)
3125   __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
3126   __ push(ztos);
3127   // Rewrite bytecode to be faster
3128   if (!is_static && rc == may_rewrite) {
3129     // use btos rewriting, no truncating to t/f bit is needed for getfield.
3130     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
3131   }
3132   __ jmp(Done);
3133 
3134   __ bind(notBool);
3135   __ cmpl(tos_state, atos);
3136   __ jcc(Assembler::notEqual, notObj);
3137   // atos
3138   if (!EnableValhalla) {
3139     if (!is_static) pop_and_check_object(obj);
3140     do_oop_load(_masm, field, rax);
3141     __ push(atos);
3142     if (!is_static && rc == may_rewrite) {
3143       patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
3144     }
3145     __ jmp(Done);
3146   } else {
3147     if (is_static) {
3148       __ load_heap_oop(rax, field);
3149       Label is_null_free_inline_type, uninitialized;
3150       // Issue below if the static field has not been initialized yet
3151       __ test_field_is_null_free_inline_type(flags, rscratch1, is_null_free_inline_type);
3152         // field is not a null free inline type
3153         __ push(atos);
3154         __ jmp(Done);
3155       // field is a null free inline type, must not return null even if uninitialized
3156       __ bind(is_null_free_inline_type);
3157           __ testptr(rax, rax);
3158         __ jcc(Assembler::zero, uninitialized);
3159           __ push(atos);
3160           __ jmp(Done);
3161         __ bind(uninitialized);
3162 #ifdef _LP64
3163           Label slow_case, finish;
3164           __ movptr(rbx, Address(obj, java_lang_Class::klass_offset()));
3165           __ cmpb(Address(rbx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3166           __ jcc(Assembler::notEqual, slow_case);
3167         __ get_default_value_oop(rbx, rscratch1, rax);
3168         __ jmp(finish);
3169         __ bind(slow_case);
3170 #endif // LP64
3171           __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_inline_type_field),
3172                 obj, cache);
3173 #ifdef _LP64
3174           __ bind(finish);
3175   #endif // _LP64
3176         __ verify_oop(rax);
3177         __ push(atos);
3178         __ jmp(Done);
3179     } else {
3180       Label is_flat, nonnull, is_inline_type, rewrite_inline, has_null_marker;
3181       __ test_field_is_null_free_inline_type(flags, rscratch1, is_inline_type);
3182       __ test_field_has_null_marker(flags, rscratch1, has_null_marker);
3183       // field is not a null free inline type
3184       pop_and_check_object(obj);
3185       __ load_heap_oop(rax, field);
3186       __ push(atos);
3187       if (rc == may_rewrite) {
3188         patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
3189       }
3190       __ jmp(Done);
3191       __ bind(is_inline_type);
3192       __ test_field_is_flat(flags, rscratch1, is_flat);
3193           // field is not flat
3194           pop_and_check_object(obj);
3195           __ load_heap_oop(rax, field);
3196           __ testptr(rax, rax);
3197           __ jcc(Assembler::notZero, nonnull);
3198             __ load_unsigned_short(flags, Address(cache, in_bytes(ResolvedFieldEntry::field_index_offset())));
3199             __ movptr(rcx, Address(cache, ResolvedFieldEntry::field_holder_offset()));
3200             __ get_inline_type_field_klass(rcx, flags, rbx);
3201             __ get_default_value_oop(rbx, rcx, rax);
3202           __ bind(nonnull);
3203           __ verify_oop(rax);
3204           __ push(atos);
3205           __ jmp(rewrite_inline);
3206         __ bind(is_flat);
3207           pop_and_check_object(rax);
3208           __ read_flat_field(rcx, rdx, rbx, rax);
3209           __ verify_oop(rax);
3210           __ push(atos);
3211           __ jmp(rewrite_inline);
3212       __ bind(has_null_marker);
3213         pop_and_check_object(rax);
3214         __ load_field_entry(rcx, rbx);
3215         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), rax, rcx);
3216         __ get_vm_result(rax, r15_thread);
3217         __ push(atos);
3218       __ bind(rewrite_inline);
3219       if (rc == may_rewrite) {
3220         patch_bytecode(Bytecodes::_fast_vgetfield, bc, rbx);
3221       }
3222         __ jmp(Done);
3223     }
3224   }
3225 
3226   __ bind(notObj);
3227 
3228   if (!is_static) pop_and_check_object(obj);
3229 
3230   __ cmpl(tos_state, itos);
3231   __ jcc(Assembler::notEqual, notInt);
3232   // itos
3233   __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3234   __ push(itos);
3235   // Rewrite bytecode to be faster
3236   if (!is_static && rc == may_rewrite) {
3237     patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
3238   }
3239   __ jmp(Done);
3240 
3241   __ bind(notInt);
3242   __ cmpl(tos_state, ctos);
3243   __ jcc(Assembler::notEqual, notChar);
3244   // ctos
3245   __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3246   __ push(ctos);
3247   // Rewrite bytecode to be faster
3248   if (!is_static && rc == may_rewrite) {
3249     patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
3250   }
3251   __ jmp(Done);
3252 
3253   __ bind(notChar);
3254   __ cmpl(tos_state, stos);
3255   __ jcc(Assembler::notEqual, notShort);
3256   // stos
3257   __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3258   __ push(stos);
3259   // Rewrite bytecode to be faster
3260   if (!is_static && rc == may_rewrite) {
3261     patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
3262   }
3263   __ jmp(Done);
3264 
3265   __ bind(notShort);
3266   __ cmpl(tos_state, ltos);
3267   __ jcc(Assembler::notEqual, notLong);
3268   // ltos
3269     // Generate code as if volatile (x86_32).  There just aren't enough registers to
3270     // save that information and this code is faster than the test.
3271   __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
3272   __ push(ltos);
3273   // Rewrite bytecode to be faster
3274   LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
3275   __ jmp(Done);
3276 
3277   __ bind(notLong);
3278   __ cmpl(tos_state, ftos);
3279   __ jcc(Assembler::notEqual, notFloat);
3280   // ftos
3281 
3282   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3283   __ push(ftos);
3284   // Rewrite bytecode to be faster
3285   if (!is_static && rc == may_rewrite) {
3286     patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
3287   }
3288   __ jmp(Done);
3289 
3290   __ bind(notFloat);
3291 #ifdef ASSERT
3292   Label notDouble;
3293   __ cmpl(tos_state, dtos);
3294   __ jcc(Assembler::notEqual, notDouble);
3295 #endif
3296   // dtos
3297   // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3298   __ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg, noreg);
3299   __ push(dtos);
3300   // Rewrite bytecode to be faster
3301   if (!is_static && rc == may_rewrite) {
3302     patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
3303   }
3304 #ifdef ASSERT
3305   __ jmp(Done);
3306 
3307   __ bind(notDouble);
3308   __ stop("Bad state");
3309 #endif
3310 
3311   __ bind(Done);
3312   // [jk] not needed currently
3313   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
3314   //                                              Assembler::LoadStore));
3315 }
3316 
3317 void TemplateTable::getfield(int byte_no) {
3318   getfield_or_static(byte_no, false);
3319 }
3320 
3321 void TemplateTable::nofast_getfield(int byte_no) {
3322   getfield_or_static(byte_no, false, may_not_rewrite);
3323 }
3324 
3325 void TemplateTable::getstatic(int byte_no) {
3326   getfield_or_static(byte_no, true);
3327 }
3328 
3329 // The registers cache and index expected to be set before call.
3330 // The function may destroy various registers, just not the cache and index registers.
3331 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
3332   // Cache is rcx and index is rdx
3333   const Register entry = LP64_ONLY(c_rarg2) NOT_LP64(rax); // ResolvedFieldEntry
3334   const Register obj = LP64_ONLY(c_rarg1) NOT_LP64(rbx);   // Object pointer
3335   const Register value = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // JValue object
3336 
3337   if (JvmtiExport::can_post_field_modification()) {
3338     // Check to see if a field modification watch has been set before
3339     // we take the time to call into the VM.
3340     Label L1;
3341     assert_different_registers(cache, obj, rax);
3342     __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3343     __ testl(rax, rax);
3344     __ jcc(Assembler::zero, L1);
3345 
3346     __ mov(entry, cache);
3347 
3348     if (is_static) {
3349       // Life is simple.  Null out the object pointer.
3350       __ xorl(obj, obj);
3351 
3352     } else {
3353       // Life is harder. The stack holds the value on top, followed by
3354       // the object.  We don't know the size of the value, though; it
3355       // could be one or two words depending on its type. As a result,
3356       // we must find the type to determine where the object is.
3357 #ifndef _LP64
3358       Label two_word, valsize_known;
3359 #endif
3360       __ load_unsigned_byte(value, Address(entry, in_bytes(ResolvedFieldEntry::type_offset())));
3361 #ifdef _LP64
3362       __ movptr(obj, at_tos_p1());  // initially assume a one word jvalue
3363       __ cmpl(value, ltos);
3364       __ cmovptr(Assembler::equal,
3365                  obj, at_tos_p2()); // ltos (two word jvalue)
3366       __ cmpl(value, dtos);
3367       __ cmovptr(Assembler::equal,
3368                  obj, at_tos_p2()); // dtos (two word jvalue)
3369 #else
3370       __ mov(obj, rsp);
3371       __ cmpl(value, ltos);
3372       __ jccb(Assembler::equal, two_word);
3373       __ cmpl(value, dtos);
3374       __ jccb(Assembler::equal, two_word);
3375       __ addptr(obj, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3376       __ jmpb(valsize_known);
3377 
3378       __ bind(two_word);
3379       __ addptr(obj, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3380 
3381       __ bind(valsize_known);
3382       // setup object pointer
3383       __ movptr(obj, Address(obj, 0));
3384 #endif
3385     }
3386 
3387     // object (tos)
3388     __ mov(value, rsp);
3389     // obj: object pointer set up above (null if static)
3390     // cache: field entry pointer
3391     // value: jvalue object on the stack
3392     __ call_VM(noreg,
3393               CAST_FROM_FN_PTR(address,
3394                               InterpreterRuntime::post_field_modification),
3395               obj, entry, value);
3396     // Reload field entry
3397     __ load_field_entry(cache, index);
3398     __ bind(L1);
3399   }
3400 }
3401 
3402 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3403   transition(vtos, vtos);
3404 
3405   const Register obj = rcx;
3406   const Register cache = rcx;
3407   const Register index = rdx;
3408   const Register tos_state   = rdx;
3409   const Register off   = rbx;
3410   const Register flags = r9;
3411 
3412   resolve_cache_and_index_for_field(byte_no, cache, index);
3413   jvmti_post_field_mod(cache, index, is_static);
3414   load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
3415 
3416   // [jk] not needed currently
3417   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3418   //                                              Assembler::StoreStore));
3419 
3420   Label notVolatile, Done;
3421 
3422   // Check for volatile store
3423   __ movl(rscratch1, flags);
3424   __ andl(rscratch1, (1 << ResolvedFieldEntry::is_volatile_shift));
3425   __ testl(rscratch1, rscratch1);
3426   __ jcc(Assembler::zero, notVolatile);
3427 
3428   putfield_or_static_helper(byte_no, is_static, rc, obj, off, tos_state, flags);
3429   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3430                                                Assembler::StoreStore));
3431   __ jmp(Done);
3432   __ bind(notVolatile);
3433 
3434   putfield_or_static_helper(byte_no, is_static, rc, obj, off, tos_state, flags);
3435 
3436   __ bind(Done);
3437 }
3438 
3439 void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
3440                                               Register obj, Register off, Register tos_state, Register flags) {
3441 
3442   // field addresses
3443   const Address field(obj, off, Address::times_1, 0*wordSize);
3444   NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3445 
3446   Label notByte, notBool, notInt, notShort, notChar,
3447         notLong, notFloat, notObj, notInlineType;
3448   Label Done;
3449 
3450   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3451 
3452   // Test TOS state
3453   __ testl(tos_state, tos_state);
3454   __ jcc(Assembler::notZero, notByte);
3455 
3456   // btos
3457   {
3458     __ pop(btos);
3459     if (!is_static) pop_and_check_object(obj);
3460     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg, noreg);
3461     if (!is_static && rc == may_rewrite) {
3462       patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3463     }
3464     __ jmp(Done);
3465   }
3466 
3467   __ bind(notByte);
3468   __ cmpl(tos_state, ztos);
3469   __ jcc(Assembler::notEqual, notBool);
3470 
3471   // ztos
3472   {
3473     __ pop(ztos);
3474     if (!is_static) pop_and_check_object(obj);
3475     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg, noreg);
3476     if (!is_static && rc == may_rewrite) {
3477       patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3478     }
3479     __ jmp(Done);
3480   }
3481 
3482   __ bind(notBool);
3483   __ cmpl(tos_state, atos);
3484   __ jcc(Assembler::notEqual, notObj);
3485 
3486   // atos
3487   {
3488     if (!EnableValhalla) {
3489       __ pop(atos);
3490       if (!is_static) pop_and_check_object(obj);
3491       // Store into the field
3492       do_oop_store(_masm, field, rax);
3493       if (!is_static && rc == may_rewrite) {
3494         patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3495       }
3496       __ jmp(Done);
3497     } else {
3498       __ pop(atos);
3499       if (is_static) {
3500         Label is_inline_type;
3501         __ test_field_is_not_null_free_inline_type(flags, rscratch1, is_inline_type);
3502         __ null_check(rax);
3503         __ bind(is_inline_type);
3504         do_oop_store(_masm, field, rax);
3505         __ jmp(Done);
3506       } else {
3507         Label is_null_free_inline_type, is_flat, has_null_marker,
3508               write_null, rewrite_not_inline, rewrite_inline;
3509         __ test_field_is_null_free_inline_type(flags, rscratch1, is_null_free_inline_type);
3510         __ test_field_has_null_marker(flags, rscratch1, has_null_marker);
3511           // Not an inline type
3512           pop_and_check_object(obj);
3513           // Store into the field
3514           do_oop_store(_masm, field, rax);
3515           __ bind(rewrite_not_inline);
3516           if (rc == may_rewrite) {
3517             patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3518           }
3519           __ jmp(Done);
3520         // Implementation of the inline type semantic
3521         __ bind(is_null_free_inline_type);
3522           __ null_check(rax);
3523           __ test_field_is_flat(flags, rscratch1, is_flat);
3524             // field is not flat
3525             pop_and_check_object(obj);
3526             // Store into the field
3527             do_oop_store(_masm, field, rax);
3528           __ jmp(rewrite_inline);
3529           __ bind(is_flat);
3530             // field is flat
3531             __ load_unsigned_short(rdx, Address(rcx, in_bytes(ResolvedFieldEntry::field_index_offset())));
3532             __ movptr(r9, Address(rcx, in_bytes(ResolvedFieldEntry::field_holder_offset())));
3533             pop_and_check_object(obj);  // obj = rcx
3534             __ load_klass(r8, rax, rscratch1);
3535             __ payload_addr(rax, rax, r8);
3536             __ addptr(obj, off);
3537             __ inline_layout_info(r9, rdx, rbx);
3538             // because we use InlineLayoutInfo, we need special value access code specialized for fields (arrays will need a different API)
3539             __ flat_field_copy(IN_HEAP, rax, obj, rbx);
3540             __ jmp(rewrite_inline);
3541         __ bind(has_null_marker); // has null marker means the field is flat with a null marker
3542           pop_and_check_object(rbx);
3543           __ load_field_entry(rcx, rdx);
3544           call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), rbx, rax, rcx);
3545         __ bind(rewrite_inline);
3546         if (rc == may_rewrite) {
3547           patch_bytecode(Bytecodes::_fast_vputfield, bc, rbx, true, byte_no);
3548         }
3549         __ jmp(Done);
3550       }
3551     }
3552   }
3553 
3554   __ bind(notObj);
3555   __ cmpl(tos_state, itos);
3556   __ jcc(Assembler::notEqual, notInt);
3557 
3558   // itos
3559   {
3560     __ pop(itos);
3561     if (!is_static) pop_and_check_object(obj);
3562     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg, noreg);
3563     if (!is_static && rc == may_rewrite) {
3564       patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3565     }
3566     __ jmp(Done);
3567   }
3568 
3569   __ bind(notInt);
3570   __ cmpl(tos_state, ctos);
3571   __ jcc(Assembler::notEqual, notChar);
3572 
3573   // ctos
3574   {
3575     __ pop(ctos);
3576     if (!is_static) pop_and_check_object(obj);
3577     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg, noreg);
3578     if (!is_static && rc == may_rewrite) {
3579       patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3580     }
3581     __ jmp(Done);
3582   }
3583 
3584   __ bind(notChar);
3585   __ cmpl(tos_state, stos);
3586   __ jcc(Assembler::notEqual, notShort);
3587 
3588   // stos
3589   {
3590     __ pop(stos);
3591     if (!is_static) pop_and_check_object(obj);
3592     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg, noreg);
3593     if (!is_static && rc == may_rewrite) {
3594       patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3595     }
3596     __ jmp(Done);
3597   }
3598 
3599   __ bind(notShort);
3600   __ cmpl(tos_state, ltos);
3601   __ jcc(Assembler::notEqual, notLong);
3602 
3603   // ltos
3604   {
3605     __ pop(ltos);
3606     if (!is_static) pop_and_check_object(obj);
3607     // MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
3608     __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg, noreg);
3609 #ifdef _LP64
3610     if (!is_static && rc == may_rewrite) {
3611       patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3612     }
3613 #endif // _LP64
3614     __ jmp(Done);
3615   }
3616 
3617   __ bind(notLong);
3618   __ cmpl(tos_state, ftos);
3619   __ jcc(Assembler::notEqual, notFloat);
3620 
3621   // ftos
3622   {
3623     __ pop(ftos);
3624     if (!is_static) pop_and_check_object(obj);
3625     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3626     if (!is_static && rc == may_rewrite) {
3627       patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3628     }
3629     __ jmp(Done);
3630   }
3631 
3632   __ bind(notFloat);
3633 #ifdef ASSERT
3634   Label notDouble;
3635   __ cmpl(tos_state, dtos);
3636   __ jcc(Assembler::notEqual, notDouble);
3637 #endif
3638 
3639   // dtos
3640   {
3641     __ pop(dtos);
3642     if (!is_static) pop_and_check_object(obj);
3643     // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3644     __ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg, noreg);
3645     if (!is_static && rc == may_rewrite) {
3646       patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3647     }
3648   }
3649 
3650 #ifdef ASSERT
3651   __ jmp(Done);
3652 
3653   __ bind(notDouble);
3654   __ stop("Bad state");
3655 #endif
3656 
3657   __ bind(Done);
3658 }
3659 
3660 void TemplateTable::putfield(int byte_no) {
3661   putfield_or_static(byte_no, false);
3662 }
3663 
3664 void TemplateTable::nofast_putfield(int byte_no) {
3665   putfield_or_static(byte_no, false, may_not_rewrite);
3666 }
3667 
3668 void TemplateTable::putstatic(int byte_no) {
3669   putfield_or_static(byte_no, true);
3670 }
3671 
3672 void TemplateTable::jvmti_post_fast_field_mod() {
3673 
3674   const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3675 
3676   if (JvmtiExport::can_post_field_modification()) {
3677     // Check to see if a field modification watch has been set before
3678     // we take the time to call into the VM.
3679     Label L2;
3680     __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3681     __ testl(scratch, scratch);
3682     __ jcc(Assembler::zero, L2);
3683     __ pop_ptr(rbx);                  // copy the object pointer from tos
3684     __ verify_oop(rbx);
3685     __ push_ptr(rbx);                 // put the object pointer back on tos
3686     // Save tos values before call_VM() clobbers them. Since we have
3687     // to do it for every data type, we use the saved values as the
3688     // jvalue object.
3689     switch (bytecode()) {          // load values into the jvalue object
3690     case Bytecodes::_fast_vputfield: //fall through
3691     case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3692     case Bytecodes::_fast_bputfield: // fall through
3693     case Bytecodes::_fast_zputfield: // fall through
3694     case Bytecodes::_fast_sputfield: // fall through
3695     case Bytecodes::_fast_cputfield: // fall through
3696     case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3697     case Bytecodes::_fast_dputfield: __ push(dtos); break;
3698     case Bytecodes::_fast_fputfield: __ push(ftos); break;
3699     case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3700 
3701     default:
3702       ShouldNotReachHere();
3703     }
3704     __ mov(scratch, rsp);             // points to jvalue on the stack
3705     // access constant pool cache entry
3706     LP64_ONLY(__ load_field_entry(c_rarg2, rax));
3707     NOT_LP64(__ load_field_entry(rax, rdx));
3708     __ verify_oop(rbx);
3709     // rbx: object pointer copied above
3710     // c_rarg2: cache entry pointer
3711     // c_rarg3: jvalue object on the stack
3712     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3713     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3714 
3715     switch (bytecode()) {             // restore tos values
3716     case Bytecodes::_fast_vputfield: // fall through
3717     case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3718     case Bytecodes::_fast_bputfield: // fall through
3719     case Bytecodes::_fast_zputfield: // fall through
3720     case Bytecodes::_fast_sputfield: // fall through
3721     case Bytecodes::_fast_cputfield: // fall through
3722     case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3723     case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3724     case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3725     case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3726     default: break;
3727     }
3728     __ bind(L2);
3729   }
3730 }
3731 
3732 void TemplateTable::fast_storefield(TosState state) {
3733   transition(state, vtos);
3734 
3735   Label notVolatile, Done;
3736 
3737   jvmti_post_fast_field_mod();
3738 
3739   __ push(rax);
3740   __ load_field_entry(rcx, rax);
3741   load_resolved_field_entry(noreg, rcx, rax, rbx, rdx);
3742   __ pop(rax);
3743   // RBX: field offset, RCX: RAX: TOS, RDX: flags
3744 
3745   // Get object from stack
3746   pop_and_check_object(rcx);
3747 
3748   // field address
3749   const Address field(rcx, rbx, Address::times_1);
3750 
3751   // Check for volatile store
3752   __ movl(rscratch2, rdx);  // saving flags for is_flat test
3753   __ andl(rscratch2, (1 << ResolvedFieldEntry::is_volatile_shift));
3754   __ testl(rscratch2, rscratch2);
3755   __ jcc(Assembler::zero, notVolatile);
3756 
3757   fast_storefield_helper(field, rax, rdx);
3758   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3759                                                Assembler::StoreStore));
3760   __ jmp(Done);
3761   __ bind(notVolatile);
3762 
3763   fast_storefield_helper(field, rax, rdx);
3764 
3765   __ bind(Done);
3766 }
3767 
3768 void TemplateTable::fast_storefield_helper(Address field, Register rax, Register flags) {
3769 
3770   // DANGER: 'field' argument depends on rcx and rbx
3771 
3772   // access field
3773   switch (bytecode()) {
3774   case Bytecodes::_fast_vputfield:
3775     {
3776       Label is_flat, has_null_marker, write_null, done;
3777       __ test_field_has_null_marker(flags, rscratch1, has_null_marker);
3778       // Null free field cases: flat or not flat
3779       __ null_check(rax);
3780       __ test_field_is_flat(flags, rscratch1, is_flat);
3781         // field is not flat
3782         do_oop_store(_masm, field, rax);
3783         __ jmp(done);
3784       __ bind(is_flat);
3785         __ load_field_entry(r8, r9);
3786         __ load_unsigned_short(r9, Address(r8, in_bytes(ResolvedFieldEntry::field_index_offset())));
3787         __ movptr(r8, Address(r8, in_bytes(ResolvedFieldEntry::field_holder_offset())));
3788         __ inline_layout_info(r8, r9, r8);
3789         __ load_klass(rdx, rax, rscratch1);
3790         __ payload_addr(rax, rax, rdx);
3791         __ lea(rcx, field);
3792         __ flat_field_copy(IN_HEAP, rax, rcx, r8);
3793         __ jmp(done);
3794       __ bind(has_null_marker); // has null marker means the field is flat with a null marker
3795         __ movptr(rbx, rcx);
3796         __ load_field_entry(rcx, rdx);
3797         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), rbx, rax, rcx);
3798       __ bind(done);
3799     }
3800     break;
3801   case Bytecodes::_fast_aputfield:
3802     {
3803       do_oop_store(_masm, field, rax);
3804     }
3805     break;
3806   case Bytecodes::_fast_lputfield:
3807 #ifdef _LP64
3808     __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg, noreg);
3809 #else
3810   __ stop("should not be rewritten");
3811 #endif
3812     break;
3813   case Bytecodes::_fast_iputfield:
3814     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg, noreg);
3815     break;
3816   case Bytecodes::_fast_zputfield:
3817     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg, noreg);
3818     break;
3819   case Bytecodes::_fast_bputfield:
3820     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg, noreg);
3821     break;
3822   case Bytecodes::_fast_sputfield:
3823     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg, noreg);
3824     break;
3825   case Bytecodes::_fast_cputfield:
3826     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg, noreg);
3827     break;
3828   case Bytecodes::_fast_fputfield:
3829     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg, noreg);
3830     break;
3831   case Bytecodes::_fast_dputfield:
3832     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg, noreg);
3833     break;
3834   default:
3835     ShouldNotReachHere();
3836   }
3837 }
3838 
3839 void TemplateTable::fast_accessfield(TosState state) {
3840   transition(atos, state);
3841 
3842   // Do the JVMTI work here to avoid disturbing the register state below
3843   if (JvmtiExport::can_post_field_access()) {
3844     // Check to see if a field access watch has been set before we
3845     // take the time to call into the VM.
3846     Label L1;
3847     __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3848     __ testl(rcx, rcx);
3849     __ jcc(Assembler::zero, L1);
3850     // access constant pool cache entry
3851     LP64_ONLY(__ load_field_entry(c_rarg2, rcx));
3852     NOT_LP64(__ load_field_entry(rcx, rdx));
3853     __ verify_oop(rax);
3854     __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
3855     LP64_ONLY(__ mov(c_rarg1, rax));
3856     // c_rarg1: object pointer copied above
3857     // c_rarg2: cache entry pointer
3858     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3859     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3860     __ pop_ptr(rax); // restore object pointer
3861     __ bind(L1);
3862   }
3863 
3864   // access constant pool cache
3865   __ load_field_entry(rcx, rbx);
3866   __ load_sized_value(rdx, Address(rcx, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3867 
3868   // rax: object
3869   __ verify_oop(rax);
3870   __ null_check(rax);
3871   Address field(rax, rdx, Address::times_1);
3872 
3873   // access field
3874   switch (bytecode()) {
3875   case Bytecodes::_fast_vgetfield:
3876     {
3877       Label is_flat, nonnull, Done, has_null_marker;
3878       __ load_unsigned_byte(rscratch1, Address(rcx, in_bytes(ResolvedFieldEntry::flags_offset())));
3879       __ test_field_has_null_marker(rscratch1, rscratch2, has_null_marker);
3880       __ test_field_is_flat(rscratch1, rscratch2, is_flat);
3881         // field is not flat
3882         __ load_heap_oop(rax, field);
3883         __ testptr(rax, rax);
3884         __ jcc(Assembler::notZero, nonnull);
3885           __ load_unsigned_short(rdx, Address(rcx, in_bytes(ResolvedFieldEntry::field_index_offset())));
3886           __ movptr(rcx, Address(rcx, ResolvedFieldEntry::field_holder_offset()));
3887           __ get_inline_type_field_klass(rcx, rdx, rbx);
3888           __ get_default_value_oop(rbx, rcx, rax);
3889         __ bind(nonnull);
3890         __ verify_oop(rax);
3891         __ jmp(Done);
3892       __ bind(is_flat);
3893       // field is flat
3894         __ read_flat_field(rcx, rdx, rbx, rax);
3895         __ jmp(Done);
3896       __ bind(has_null_marker);
3897         // rax = instance, rcx = resolved entry
3898         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), rax, rcx);
3899         __ get_vm_result(rax, r15_thread);
3900       __ bind(Done);
3901       __ verify_oop(rax);
3902     }
3903     break;
3904   case Bytecodes::_fast_agetfield:
3905     do_oop_load(_masm, field, rax);
3906     __ verify_oop(rax);
3907     break;
3908   case Bytecodes::_fast_lgetfield:
3909 #ifdef _LP64
3910     __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
3911 #else
3912   __ stop("should not be rewritten");
3913 #endif
3914     break;
3915   case Bytecodes::_fast_igetfield:
3916     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3917     break;
3918   case Bytecodes::_fast_bgetfield:
3919     __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3920     break;
3921   case Bytecodes::_fast_sgetfield:
3922     __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3923     break;
3924   case Bytecodes::_fast_cgetfield:
3925     __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3926     break;
3927   case Bytecodes::_fast_fgetfield:
3928     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3929     break;
3930   case Bytecodes::_fast_dgetfield:
3931     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3932     break;
3933   default:
3934     ShouldNotReachHere();
3935   }
3936   // [jk] not needed currently
3937   //   Label notVolatile;
3938   //   __ testl(rdx, rdx);
3939   //   __ jcc(Assembler::zero, notVolatile);
3940   //   __ membar(Assembler::LoadLoad);
3941   //   __ bind(notVolatile);
3942 }
3943 
3944 void TemplateTable::fast_xaccess(TosState state) {
3945   transition(vtos, state);
3946 
3947   // get receiver
3948   __ movptr(rax, aaddress(0));
3949   // access constant pool cache
3950   __ load_field_entry(rcx, rdx, 2);
3951   __ load_sized_value(rbx, Address(rcx, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3952 
3953   // make sure exception is reported in correct bcp range (getfield is
3954   // next instruction)
3955   __ increment(rbcp);
3956   __ null_check(rax);
3957   const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3958   switch (state) {
3959   case itos:
3960     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3961     break;
3962   case atos:
3963     do_oop_load(_masm, field, rax);
3964     __ verify_oop(rax);
3965     break;
3966   case ftos:
3967     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3968     break;
3969   default:
3970     ShouldNotReachHere();
3971   }
3972 
3973   // [jk] not needed currently
3974   // Label notVolatile;
3975   // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3976   //                      in_bytes(ConstantPoolCache::base_offset() +
3977   //                               ConstantPoolCacheEntry::flags_offset())));
3978   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3979   // __ testl(rdx, 0x1);
3980   // __ jcc(Assembler::zero, notVolatile);
3981   // __ membar(Assembler::LoadLoad);
3982   // __ bind(notVolatile);
3983 
3984   __ decrement(rbcp);
3985 }
3986 
3987 //-----------------------------------------------------------------------------
3988 // Calls
3989 
3990 void TemplateTable::prepare_invoke(Register cache, Register recv, Register flags) {
3991   // determine flags
3992   const Bytecodes::Code code = bytecode();
3993   const bool load_receiver       = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
3994   assert_different_registers(recv, flags);
3995 
3996   // save 'interpreter return address'
3997   __ save_bcp();
3998 
3999   // Save flags and load TOS
4000   __ movl(rbcp, flags);
4001   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
4002 
4003   // load receiver if needed (after appendix is pushed so parameter size is correct)
4004   // Note: no return address pushed yet
4005   if (load_receiver) {
4006     __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
4007     const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
4008     const int receiver_is_at_end      = -1;  // back off one slot to get receiver
4009     Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
4010     __ movptr(recv, recv_addr);
4011     __ verify_oop(recv);
4012   }
4013 
4014   // load return address
4015   {
4016     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
4017     ExternalAddress table(table_addr);
4018 #ifdef _LP64
4019     __ lea(rscratch1, table);
4020     __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
4021 #else
4022     __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
4023 #endif // _LP64
4024   }
4025 
4026   // push return address
4027   __ push(flags);
4028 
4029   // Restore flags value from the constant pool cache entry, and restore rsi
4030   // for later null checks.  r13 is the bytecode pointer
4031   __ movl(flags, rbcp);
4032   __ restore_bcp();
4033 }
4034 
4035 void TemplateTable::invokevirtual_helper(Register index,
4036                                          Register recv,
4037                                          Register flags) {
4038   // Uses temporary registers rax, rdx
4039   assert_different_registers(index, recv, rax, rdx);
4040   assert(index == rbx, "");
4041   assert(recv  == rcx, "");
4042 
4043   // Test for an invoke of a final method
4044   Label notFinal;
4045   __ movl(rax, flags);
4046   __ andl(rax, (1 << ResolvedMethodEntry::is_vfinal_shift));
4047   __ jcc(Assembler::zero, notFinal);
4048 
4049   const Register method = index;  // method must be rbx
4050   assert(method == rbx,
4051          "Method* must be rbx for interpreter calling convention");
4052 
4053   // do the call - the index is actually the method to call
4054   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
4055 
4056   // It's final, need a null check here!
4057   __ null_check(recv);
4058 
4059   // profile this call
4060   __ profile_final_call(rax);
4061   __ profile_arguments_type(rax, method, rbcp, true);
4062 
4063   __ jump_from_interpreted(method, rax);
4064 
4065   __ bind(notFinal);
4066 
4067   // get receiver klass
4068   __ load_klass(rax, recv, rscratch1);
4069 
4070   // profile this call
4071   __ profile_virtual_call(rax, rlocals, rdx);
4072   // get target Method* & entry point
4073   __ lookup_virtual_method(rax, index, method);
4074 
4075   __ profile_arguments_type(rdx, method, rbcp, true);
4076   __ jump_from_interpreted(method, rdx);
4077 }
4078 
4079 void TemplateTable::invokevirtual(int byte_no) {
4080   transition(vtos, vtos);
4081   assert(byte_no == f2_byte, "use this argument");
4082 
4083   load_resolved_method_entry_virtual(rcx,  // ResolvedMethodEntry*
4084                                      rbx,  // Method or itable index
4085                                      rdx); // Flags
4086   prepare_invoke(rcx,  // ResolvedMethodEntry*
4087                  rcx,  // Receiver
4088                  rdx); // flags
4089 
4090   // rbx: index
4091   // rcx: receiver
4092   // rdx: flags
4093   invokevirtual_helper(rbx, rcx, rdx);
4094 }
4095 
4096 void TemplateTable::invokespecial(int byte_no) {
4097   transition(vtos, vtos);
4098   assert(byte_no == f1_byte, "use this argument");
4099 
4100   load_resolved_method_entry_special_or_static(rcx,  // ResolvedMethodEntry*
4101                                                rbx,  // Method*
4102                                                rdx); // flags
4103   prepare_invoke(rcx,
4104                  rcx,  // get receiver also for null check
4105                  rdx); // flags
4106 
4107   __ verify_oop(rcx);
4108   __ null_check(rcx);
4109   // do the call
4110   __ profile_call(rax);
4111   __ profile_arguments_type(rax, rbx, rbcp, false);
4112   __ jump_from_interpreted(rbx, rax);
4113 }
4114 
4115 void TemplateTable::invokestatic(int byte_no) {
4116   transition(vtos, vtos);
4117   assert(byte_no == f1_byte, "use this argument");
4118 
4119   load_resolved_method_entry_special_or_static(rcx, // ResolvedMethodEntry*
4120                                                rbx, // Method*
4121                                                rdx  // flags
4122                                                );
4123   prepare_invoke(rcx, rcx, rdx);  // cache and flags
4124 
4125   // do the call
4126   __ profile_call(rax);
4127   __ profile_arguments_type(rax, rbx, rbcp, false);
4128   __ jump_from_interpreted(rbx, rax);
4129 }
4130 
4131 
4132 void TemplateTable::fast_invokevfinal(int byte_no) {
4133   transition(vtos, vtos);
4134   assert(byte_no == f2_byte, "use this argument");
4135   __ stop("fast_invokevfinal not used on x86");
4136 }
4137 
4138 
4139 void TemplateTable::invokeinterface(int byte_no) {
4140   transition(vtos, vtos);
4141   assert(byte_no == f1_byte, "use this argument");
4142 
4143   load_resolved_method_entry_interface(rcx,  // ResolvedMethodEntry*
4144                                        rax,  // Klass*
4145                                        rbx,  // Method* or itable/vtable index
4146                                        rdx); // flags
4147   prepare_invoke(rcx, rcx, rdx); // receiver, flags
4148 
4149   // First check for Object case, then private interface method,
4150   // then regular interface method.
4151 
4152   // Special case of invokeinterface called for virtual method of
4153   // java.lang.Object.  See cpCache.cpp for details.
4154   Label notObjectMethod;
4155   __ movl(rlocals, rdx);
4156   __ andl(rlocals, (1 << ResolvedMethodEntry::is_forced_virtual_shift));
4157   __ jcc(Assembler::zero, notObjectMethod);
4158 
4159   invokevirtual_helper(rbx, rcx, rdx);
4160   // no return from above
4161   __ bind(notObjectMethod);
4162 
4163   Label no_such_interface; // for receiver subtype check
4164   Register recvKlass; // used for exception processing
4165 
4166   // Check for private method invocation - indicated by vfinal
4167   Label notVFinal;
4168   __ movl(rlocals, rdx);
4169   __ andl(rlocals, (1 << ResolvedMethodEntry::is_vfinal_shift));
4170   __ jcc(Assembler::zero, notVFinal);
4171 
4172   // Get receiver klass into rlocals - also a null check
4173   __ load_klass(rlocals, rcx, rscratch1);
4174 
4175   Label subtype;
4176   __ check_klass_subtype(rlocals, rax, rbcp, subtype);
4177   // If we get here the typecheck failed
4178   recvKlass = rdx;
4179   __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
4180   __ jmp(no_such_interface);
4181 
4182   __ bind(subtype);
4183 
4184   // do the call - rbx is actually the method to call
4185 
4186   __ profile_final_call(rdx);
4187   __ profile_arguments_type(rdx, rbx, rbcp, true);
4188 
4189   __ jump_from_interpreted(rbx, rdx);
4190   // no return from above
4191   __ bind(notVFinal);
4192 
4193   // Get receiver klass into rdx - also a null check
4194   __ restore_locals();  // restore r14
4195   __ load_klass(rdx, rcx, rscratch1);
4196 
4197   Label no_such_method;
4198 
4199   // Preserve method for throw_AbstractMethodErrorVerbose.
4200   __ mov(rcx, rbx);
4201   // Receiver subtype check against REFC.
4202   // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
4203   __ lookup_interface_method(// inputs: rec. class, interface, itable index
4204                              rdx, rax, noreg,
4205                              // outputs: scan temp. reg, scan temp. reg
4206                              rbcp, rlocals,
4207                              no_such_interface,
4208                              /*return_method=*/false);
4209 
4210   // profile this call
4211   __ restore_bcp(); // rbcp was destroyed by receiver type check
4212   __ profile_virtual_call(rdx, rbcp, rlocals);
4213 
4214   // Get declaring interface class from method, and itable index
4215   __ load_method_holder(rax, rbx);
4216   __ movl(rbx, Address(rbx, Method::itable_index_offset()));
4217   __ subl(rbx, Method::itable_index_max);
4218   __ negl(rbx);
4219 
4220   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
4221   __ mov(rlocals, rdx);
4222   __ lookup_interface_method(// inputs: rec. class, interface, itable index
4223                              rlocals, rax, rbx,
4224                              // outputs: method, scan temp. reg
4225                              rbx, rbcp,
4226                              no_such_interface);
4227 
4228   // rbx: Method* to call
4229   // rcx: receiver
4230   // Check for abstract method error
4231   // Note: This should be done more efficiently via a throw_abstract_method_error
4232   //       interpreter entry point and a conditional jump to it in case of a null
4233   //       method.
4234   __ testptr(rbx, rbx);
4235   __ jcc(Assembler::zero, no_such_method);
4236 
4237   __ profile_arguments_type(rdx, rbx, rbcp, true);
4238 
4239   // do the call
4240   // rcx: receiver
4241   // rbx,: Method*
4242   __ jump_from_interpreted(rbx, rdx);
4243   __ should_not_reach_here();
4244 
4245   // exception handling code follows...
4246   // note: must restore interpreter registers to canonical
4247   //       state for exception handling to work correctly!
4248 
4249   __ bind(no_such_method);
4250   // throw exception
4251   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
4252   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
4253   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
4254   // Pass arguments for generating a verbose error message.
4255 #ifdef _LP64
4256   recvKlass = c_rarg1;
4257   Register method    = c_rarg2;
4258   if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
4259   if (method != rcx)    { __ movq(method, rcx);    }
4260 #else
4261   recvKlass = rdx;
4262   Register method    = rcx;
4263 #endif
4264   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
4265              recvKlass, method);
4266   // The call_VM checks for exception, so we should never return here.
4267   __ should_not_reach_here();
4268 
4269   __ bind(no_such_interface);
4270   // throw exception
4271   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
4272   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
4273   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
4274   // Pass arguments for generating a verbose error message.
4275   LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
4276   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
4277              recvKlass, rax);
4278   // the call_VM checks for exception, so we should never return here.
4279   __ should_not_reach_here();
4280 }
4281 
4282 void TemplateTable::invokehandle(int byte_no) {
4283   transition(vtos, vtos);
4284   assert(byte_no == f1_byte, "use this argument");
4285   const Register rbx_method = rbx;
4286   const Register rax_mtype  = rax;
4287   const Register rcx_recv   = rcx;
4288   const Register rdx_flags  = rdx;
4289 
4290   load_resolved_method_entry_handle(rcx, rbx_method, rax_mtype, rdx_flags);
4291   prepare_invoke(rcx, rcx_recv, rdx_flags);
4292 
4293   __ verify_method_ptr(rbx_method);
4294   __ verify_oop(rcx_recv);
4295   __ null_check(rcx_recv);
4296 
4297   // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
4298   // rbx: MH.invokeExact_MT method
4299 
4300   // Note:  rax_mtype is already pushed (if necessary)
4301 
4302   // FIXME: profile the LambdaForm also
4303   __ profile_final_call(rax);
4304   __ profile_arguments_type(rdx, rbx_method, rbcp, true);
4305 
4306   __ jump_from_interpreted(rbx_method, rdx);
4307 }
4308 
4309 void TemplateTable::invokedynamic(int byte_no) {
4310   transition(vtos, vtos);
4311   assert(byte_no == f1_byte, "use this argument");
4312 
4313   const Register rbx_method   = rbx;
4314   const Register rax_callsite = rax;
4315 
4316   load_invokedynamic_entry(rbx_method);
4317   // rax: CallSite object (from cpool->resolved_references[])
4318   // rbx: MH.linkToCallSite method
4319 
4320   // Note:  rax_callsite is already pushed
4321 
4322   // %%% should make a type profile for any invokedynamic that takes a ref argument
4323   // profile this call
4324   __ profile_call(rbcp);
4325   __ profile_arguments_type(rdx, rbx_method, rbcp, false);
4326 
4327   __ verify_oop(rax_callsite);
4328 
4329   __ jump_from_interpreted(rbx_method, rdx);
4330 }
4331 
4332 //-----------------------------------------------------------------------------
4333 // Allocation
4334 
4335 void TemplateTable::_new() {
4336   transition(vtos, atos);
4337   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
4338   Label slow_case;
4339   Label done;
4340 
4341   __ get_cpool_and_tags(rcx, rax);
4342 
4343   // Make sure the class we're about to instantiate has been resolved.
4344   // This is done before loading InstanceKlass to be consistent with the order
4345   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4346   const int tags_offset = Array<u1>::base_offset_in_bytes();
4347   __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
4348   __ jcc(Assembler::notEqual, slow_case);
4349 
4350   // get InstanceKlass
4351   __ load_resolved_klass_at_index(rcx, rcx, rdx);
4352 
4353   // make sure klass is initialized
4354   // init_state needs acquire, but x86 is TSO, and so we are already good.
4355 #ifdef _LP64
4356   assert(VM_Version::supports_fast_class_init_checks(), "must support fast class initialization checks");
4357   __ clinit_barrier(rcx, r15_thread, nullptr /*L_fast_path*/, &slow_case);
4358 #else
4359   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4360   __ jcc(Assembler::notEqual, slow_case);
4361 #endif
4362 
4363   __ allocate_instance(rcx, rax, rdx, rbx, true, slow_case);
4364     if (DTraceAllocProbes) {
4365       // Trigger dtrace event for fastpath
4366       __ push(atos);
4367       __ call_VM_leaf(
4368            CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), rax);
4369       __ pop(atos);
4370     }
4371   __ jmp(done);
4372 
4373   // slow case
4374   __ bind(slow_case);
4375 
4376   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4377   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4378 
4379   __ get_constant_pool(rarg1);
4380   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4381   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4382    __ verify_oop(rax);
4383 
4384   // continue
4385   __ bind(done);
4386 }
4387 
4388 void TemplateTable::newarray() {
4389   transition(itos, atos);
4390   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4391   __ load_unsigned_byte(rarg1, at_bcp(1));
4392   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4393           rarg1, rax);
4394 }
4395 
4396 void TemplateTable::anewarray() {
4397   transition(itos, atos);
4398 
4399   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4400   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4401 
4402   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4403   __ get_constant_pool(rarg1);
4404   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4405           rarg1, rarg2, rax);
4406 }
4407 
4408 void TemplateTable::arraylength() {
4409   transition(atos, itos);
4410   __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4411 }
4412 
4413 void TemplateTable::checkcast() {
4414   transition(atos, atos);
4415   Label done, is_null, ok_is_subtype, quicked, resolved;
4416   __ testptr(rax, rax); // object is in rax
4417   __ jcc(Assembler::zero, is_null);
4418 
4419   // Get cpool & tags index
4420   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4421   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4422   // See if bytecode has already been quicked
4423   __ movzbl(rdx, Address(rdx, rbx,
4424       Address::times_1,
4425       Array<u1>::base_offset_in_bytes()));
4426   __ cmpl(rdx, JVM_CONSTANT_Class);
4427   __ jcc(Assembler::equal, quicked);
4428   __ push(atos); // save receiver for result, and for GC
4429   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4430 
4431   // vm_result_2 has metadata result
4432 #ifndef _LP64
4433   // borrow rdi from locals
4434   __ get_thread(rdi);
4435   __ get_vm_result_2(rax, rdi);
4436   __ restore_locals();
4437 #else
4438   __ get_vm_result_2(rax, r15_thread);
4439 #endif
4440 
4441   __ pop_ptr(rdx); // restore receiver
4442   __ jmpb(resolved);
4443 
4444   // Get superklass in rax and subklass in rbx
4445   __ bind(quicked);
4446   __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4447   __ load_resolved_klass_at_index(rax, rcx, rbx);
4448 
4449   __ bind(resolved);
4450   __ load_klass(rbx, rdx, rscratch1);
4451 
4452   // Generate subtype check.  Blows rcx, rdi.  Object in rdx.
4453   // Superklass in rax.  Subklass in rbx.
4454   __ gen_subtype_check(rbx, ok_is_subtype);
4455 
4456   // Come here on failure
4457   __ push_ptr(rdx);
4458   // object is at TOS
4459   __ jump(RuntimeAddress(Interpreter::_throw_ClassCastException_entry));
4460 
4461   // Come here on success
4462   __ bind(ok_is_subtype);
4463   __ mov(rax, rdx); // Restore object in rdx
4464   __ jmp(done);
4465 
4466   __ bind(is_null);
4467 
4468   // Collect counts on whether this check-cast sees nulls a lot or not.
4469   if (ProfileInterpreter) {
4470     __ profile_null_seen(rcx);
4471   }
4472 
4473   __ bind(done);
4474 }
4475 
4476 void TemplateTable::instanceof() {
4477   transition(atos, itos);
4478   Label done, is_null, ok_is_subtype, quicked, resolved;
4479   __ testptr(rax, rax);
4480   __ jcc(Assembler::zero, is_null);
4481 
4482   // Get cpool & tags index
4483   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4484   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4485   // See if bytecode has already been quicked
4486   __ movzbl(rdx, Address(rdx, rbx,
4487         Address::times_1,
4488         Array<u1>::base_offset_in_bytes()));
4489   __ cmpl(rdx, JVM_CONSTANT_Class);
4490   __ jcc(Assembler::equal, quicked);
4491 
4492   __ push(atos); // save receiver for result, and for GC
4493   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4494   // vm_result_2 has metadata result
4495 
4496 #ifndef _LP64
4497   // borrow rdi from locals
4498   __ get_thread(rdi);
4499   __ get_vm_result_2(rax, rdi);
4500   __ restore_locals();
4501 #else
4502   __ get_vm_result_2(rax, r15_thread);
4503 #endif
4504 
4505   __ pop_ptr(rdx); // restore receiver
4506   __ verify_oop(rdx);
4507   __ load_klass(rdx, rdx, rscratch1);
4508   __ jmpb(resolved);
4509 
4510   // Get superklass in rax and subklass in rdx
4511   __ bind(quicked);
4512   __ load_klass(rdx, rax, rscratch1);
4513   __ load_resolved_klass_at_index(rax, rcx, rbx);
4514 
4515   __ bind(resolved);
4516 
4517   // Generate subtype check.  Blows rcx, rdi
4518   // Superklass in rax.  Subklass in rdx.
4519   __ gen_subtype_check(rdx, ok_is_subtype);
4520 
4521   // Come here on failure
4522   __ xorl(rax, rax);
4523   __ jmpb(done);
4524   // Come here on success
4525   __ bind(ok_is_subtype);
4526   __ movl(rax, 1);
4527 
4528   // Collect counts on whether this test sees nulls a lot or not.
4529   if (ProfileInterpreter) {
4530     __ jmp(done);
4531     __ bind(is_null);
4532     __ profile_null_seen(rcx);
4533   } else {
4534     __ bind(is_null);   // same as 'done'
4535   }
4536   __ bind(done);
4537   // rax = 0: obj == nullptr or  obj is not an instanceof the specified klass
4538   // rax = 1: obj != nullptr and obj is     an instanceof the specified klass
4539 }
4540 
4541 //----------------------------------------------------------------------------------------------------
4542 // Breakpoints
4543 void TemplateTable::_breakpoint() {
4544   // Note: We get here even if we are single stepping..
4545   // jbug insists on setting breakpoints at every bytecode
4546   // even if we are in single step mode.
4547 
4548   transition(vtos, vtos);
4549 
4550   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4551 
4552   // get the unpatched byte code
4553   __ get_method(rarg);
4554   __ call_VM(noreg,
4555              CAST_FROM_FN_PTR(address,
4556                               InterpreterRuntime::get_original_bytecode_at),
4557              rarg, rbcp);
4558   __ mov(rbx, rax);  // why?
4559 
4560   // post the breakpoint event
4561   __ get_method(rarg);
4562   __ call_VM(noreg,
4563              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4564              rarg, rbcp);
4565 
4566   // complete the execution of original bytecode
4567   __ dispatch_only_normal(vtos);
4568 }
4569 
4570 //-----------------------------------------------------------------------------
4571 // Exceptions
4572 
4573 void TemplateTable::athrow() {
4574   transition(atos, vtos);
4575   __ null_check(rax);
4576   __ jump(RuntimeAddress(Interpreter::throw_exception_entry()));
4577 }
4578 
4579 //-----------------------------------------------------------------------------
4580 // Synchronization
4581 //
4582 // Note: monitorenter & exit are symmetric routines; which is reflected
4583 //       in the assembly code structure as well
4584 //
4585 // Stack layout:
4586 //
4587 // [expressions  ] <--- rsp               = expression stack top
4588 // ..
4589 // [expressions  ]
4590 // [monitor entry] <--- monitor block top = expression stack bot
4591 // ..
4592 // [monitor entry]
4593 // [frame data   ] <--- monitor block bot
4594 // ...
4595 // [saved rbp    ] <--- rbp
4596 void TemplateTable::monitorenter() {
4597   transition(atos, vtos);
4598 
4599   // check for null object
4600   __ null_check(rax);
4601 
4602   Label is_inline_type;
4603   __ movptr(rbx, Address(rax, oopDesc::mark_offset_in_bytes()));
4604   __ test_markword_is_inline_type(rbx, is_inline_type);
4605 
4606   const Address monitor_block_top(
4607         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4608   const Address monitor_block_bot(
4609         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4610   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
4611 
4612   Label allocated;
4613 
4614   Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4615   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4616   Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4617 
4618   // initialize entry pointer
4619   __ xorl(rmon, rmon); // points to free slot or null
4620 
4621   // find a free slot in the monitor block (result in rmon)
4622   {
4623     Label entry, loop, exit;
4624     __ movptr(rtop, monitor_block_top); // derelativize pointer
4625     __ lea(rtop, Address(rbp, rtop, Address::times_ptr));
4626     // rtop points to current entry, starting with top-most entry
4627 
4628     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4629                                         // of monitor block
4630     __ jmpb(entry);
4631 
4632     __ bind(loop);
4633     // check if current entry is used
4634     __ cmpptr(Address(rtop, BasicObjectLock::obj_offset()), NULL_WORD);
4635     // if not used then remember entry in rmon
4636     __ cmovptr(Assembler::equal, rmon, rtop);   // cmov => cmovptr
4637     // check if current entry is for same object
4638     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset()));
4639     // if same object then stop searching
4640     __ jccb(Assembler::equal, exit);
4641     // otherwise advance to next entry
4642     __ addptr(rtop, entry_size);
4643     __ bind(entry);
4644     // check if bottom reached
4645     __ cmpptr(rtop, rbot);
4646     // if not at bottom then check this entry
4647     __ jcc(Assembler::notEqual, loop);
4648     __ bind(exit);
4649   }
4650 
4651   __ testptr(rmon, rmon); // check if a slot has been found
4652   __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4653 
4654   // allocate one if there's no free slot
4655   {
4656     Label entry, loop;
4657     // 1. compute new pointers          // rsp: old expression stack top
4658     __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4659     __ lea(rmon, Address(rbp, rmon, Address::times_ptr));
4660     __ subptr(rsp, entry_size);         // move expression stack top
4661     __ subptr(rmon, entry_size);        // move expression stack bottom
4662     __ mov(rtop, rsp);                  // set start value for copy loop
4663     __ subptr(monitor_block_bot, entry_size / wordSize); // set new monitor block bottom
4664     __ jmp(entry);
4665     // 2. move expression stack contents
4666     __ bind(loop);
4667     __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4668                                                 // word from old location
4669     __ movptr(Address(rtop, 0), rbot);          // and store it at new location
4670     __ addptr(rtop, wordSize);                  // advance to next word
4671     __ bind(entry);
4672     __ cmpptr(rtop, rmon);                      // check if bottom reached
4673     __ jcc(Assembler::notEqual, loop);          // if not at bottom then
4674                                                 // copy next word
4675   }
4676 
4677   // call run-time routine
4678   // rmon: points to monitor entry
4679   __ bind(allocated);
4680 
4681   // Increment bcp to point to the next bytecode, so exception
4682   // handling for async. exceptions work correctly.
4683   // The object has already been popped from the stack, so the
4684   // expression stack looks correct.
4685   __ increment(rbcp);
4686 
4687   // store object
4688   __ movptr(Address(rmon, BasicObjectLock::obj_offset()), rax);
4689   __ lock_object(rmon);
4690 
4691   // check to make sure this monitor doesn't cause stack overflow after locking
4692   __ save_bcp();  // in case of exception
4693   __ generate_stack_overflow_check(0);
4694 
4695   // The bcp has already been incremented. Just need to dispatch to
4696   // next instruction.
4697   __ dispatch_next(vtos);
4698 
4699   __ bind(is_inline_type);
4700   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4701                     InterpreterRuntime::throw_identity_exception), rax);
4702   __ should_not_reach_here();
4703 }
4704 
4705 void TemplateTable::monitorexit() {
4706   transition(atos, vtos);
4707 
4708   // check for null object
4709   __ null_check(rax);
4710 
4711   const int is_inline_type_mask = markWord::inline_type_pattern;
4712   Label has_identity;
4713   __ movptr(rbx, Address(rax, oopDesc::mark_offset_in_bytes()));
4714   __ andptr(rbx, is_inline_type_mask);
4715   __ cmpl(rbx, is_inline_type_mask);
4716   __ jcc(Assembler::notEqual, has_identity);
4717   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4718                      InterpreterRuntime::throw_illegal_monitor_state_exception));
4719   __ should_not_reach_here();
4720   __ bind(has_identity);
4721 
4722   const Address monitor_block_top(
4723         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4724   const Address monitor_block_bot(
4725         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4726   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
4727 
4728   Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4729   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4730 
4731   Label found;
4732 
4733   // find matching slot
4734   {
4735     Label entry, loop;
4736     __ movptr(rtop, monitor_block_top); // derelativize pointer
4737     __ lea(rtop, Address(rbp, rtop, Address::times_ptr));
4738     // rtop points to current entry, starting with top-most entry
4739 
4740     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4741                                         // of monitor block
4742     __ jmpb(entry);
4743 
4744     __ bind(loop);
4745     // check if current entry is for same object
4746     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset()));
4747     // if same object then stop searching
4748     __ jcc(Assembler::equal, found);
4749     // otherwise advance to next entry
4750     __ addptr(rtop, entry_size);
4751     __ bind(entry);
4752     // check if bottom reached
4753     __ cmpptr(rtop, rbot);
4754     // if not at bottom then check this entry
4755     __ jcc(Assembler::notEqual, loop);
4756   }
4757 
4758   // error handling. Unlocking was not block-structured
4759   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4760                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4761   __ should_not_reach_here();
4762 
4763   // call run-time routine
4764   __ bind(found);
4765   __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4766   __ unlock_object(rtop);
4767   __ pop_ptr(rax); // discard object
4768 }
4769 
4770 // Wide instructions
4771 void TemplateTable::wide() {
4772   transition(vtos, vtos);
4773   __ load_unsigned_byte(rbx, at_bcp(1));
4774   ExternalAddress wtable((address)Interpreter::_wentry_point);
4775   __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)), rscratch1);
4776   // Note: the rbcp increment step is part of the individual wide bytecode implementations
4777 }
4778 
4779 // Multi arrays
4780 void TemplateTable::multianewarray() {
4781   transition(vtos, atos);
4782 
4783   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4784   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4785   // last dim is on top of stack; we want address of first one:
4786   // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4787   // the latter wordSize to point to the beginning of the array.
4788   __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4789   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4790   __ load_unsigned_byte(rbx, at_bcp(3));
4791   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
4792 }