1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "compiler/disassembler.hpp"
  28 #include "gc/shared/collectedHeap.hpp"
  29 #include "gc/shared/tlab_globals.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "interpreter/interpreterRuntime.hpp"
  32 #include "interpreter/interp_masm.hpp"
  33 #include "interpreter/templateTable.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/methodData.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "prims/methodHandles.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/synchronizer.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  48 
  49 // Global Register Names
  50 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  51 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  52 
  53 // Address Computation: local variables
  54 static inline Address iaddress(int n) {
  55   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  56 }
  57 
  58 static inline Address laddress(int n) {
  59   return iaddress(n + 1);
  60 }
  61 
  62 #ifndef _LP64
  63 static inline Address haddress(int n) {
  64   return iaddress(n + 0);
  65 }
  66 #endif
  67 
  68 static inline Address faddress(int n) {
  69   return iaddress(n);
  70 }
  71 
  72 static inline Address daddress(int n) {
  73   return laddress(n);
  74 }
  75 
  76 static inline Address aaddress(int n) {
  77   return iaddress(n);
  78 }
  79 
  80 static inline Address iaddress(Register r) {
  81   return Address(rlocals, r, Address::times_ptr);
  82 }
  83 
  84 static inline Address laddress(Register r) {
  85   return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
  86 }
  87 
  88 #ifndef _LP64
  89 static inline Address haddress(Register r)       {
  90   return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
  91 }
  92 #endif
  93 
  94 static inline Address faddress(Register r) {
  95   return iaddress(r);
  96 }
  97 
  98 static inline Address daddress(Register r) {
  99   return laddress(r);
 100 }
 101 
 102 static inline Address aaddress(Register r) {
 103   return iaddress(r);
 104 }
 105 
 106 
 107 // expression stack
 108 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
 109 // data beyond the rsp which is potentially unsafe in an MT environment;
 110 // an interrupt may overwrite that data.)
 111 static inline Address at_rsp   () {
 112   return Address(rsp, 0);
 113 }
 114 
 115 // At top of Java expression stack which may be different than esp().  It
 116 // isn't for category 1 objects.
 117 static inline Address at_tos   () {
 118   return Address(rsp,  Interpreter::expr_offset_in_bytes(0));
 119 }
 120 
 121 static inline Address at_tos_p1() {
 122   return Address(rsp,  Interpreter::expr_offset_in_bytes(1));
 123 }
 124 
 125 static inline Address at_tos_p2() {
 126   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
 127 }
 128 
 129 // Condition conversion
 130 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 131   switch (cc) {
 132   case TemplateTable::equal        : return Assembler::notEqual;
 133   case TemplateTable::not_equal    : return Assembler::equal;
 134   case TemplateTable::less         : return Assembler::greaterEqual;
 135   case TemplateTable::less_equal   : return Assembler::greater;
 136   case TemplateTable::greater      : return Assembler::lessEqual;
 137   case TemplateTable::greater_equal: return Assembler::less;
 138   }
 139   ShouldNotReachHere();
 140   return Assembler::zero;
 141 }
 142 
 143 
 144 
 145 // Miscellaneous helper routines
 146 // Store an oop (or NULL) at the address described by obj.
 147 // If val == noreg this means store a NULL
 148 
 149 
 150 static void do_oop_store(InterpreterMacroAssembler* _masm,
 151                          Address dst,
 152                          Register val,
 153                          DecoratorSet decorators = 0) {
 154   assert(val == noreg || val == rax, "parameter is just for looks");
 155   __ store_heap_oop(dst, val,
 156                     NOT_LP64(rdx) LP64_ONLY(rscratch2),
 157                     NOT_LP64(rbx) LP64_ONLY(r9),
 158                     NOT_LP64(rsi) LP64_ONLY(r8), decorators);
 159 }
 160 
 161 static void do_oop_load(InterpreterMacroAssembler* _masm,
 162                         Address src,
 163                         Register dst,
 164                         DecoratorSet decorators = 0) {
 165   __ load_heap_oop(dst, src, rdx, rbx, decorators);
 166 }
 167 
 168 Address TemplateTable::at_bcp(int offset) {
 169   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 170   return Address(rbcp, offset);
 171 }
 172 
 173 
 174 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 175                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 176                                    int byte_no) {
 177   if (!RewriteBytecodes)  return;
 178   Label L_patch_done;
 179 
 180   switch (bc) {
 181   case Bytecodes::_fast_aputfield:
 182   case Bytecodes::_fast_bputfield:
 183   case Bytecodes::_fast_zputfield:
 184   case Bytecodes::_fast_cputfield:
 185   case Bytecodes::_fast_dputfield:
 186   case Bytecodes::_fast_fputfield:
 187   case Bytecodes::_fast_iputfield:
 188   case Bytecodes::_fast_lputfield:
 189   case Bytecodes::_fast_sputfield:
 190     {
 191       // We skip bytecode quickening for putfield instructions when
 192       // the put_code written to the constant pool cache is zero.
 193       // This is required so that every execution of this instruction
 194       // calls out to InterpreterRuntime::resolve_get_put to do
 195       // additional, required work.
 196       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 197       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 198       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 199       __ movl(bc_reg, bc);
 200       __ cmpl(temp_reg, (int) 0);
 201       __ jcc(Assembler::zero, L_patch_done);  // don't patch
 202     }
 203     break;
 204   default:
 205     assert(byte_no == -1, "sanity");
 206     // the pair bytecodes have already done the load.
 207     if (load_bc_into_bc_reg) {
 208       __ movl(bc_reg, bc);
 209     }
 210   }
 211 
 212   if (JvmtiExport::can_post_breakpoint()) {
 213     Label L_fast_patch;
 214     // if a breakpoint is present we can't rewrite the stream directly
 215     __ movzbl(temp_reg, at_bcp(0));
 216     __ cmpl(temp_reg, Bytecodes::_breakpoint);
 217     __ jcc(Assembler::notEqual, L_fast_patch);
 218     __ get_method(temp_reg);
 219     // Let breakpoint table handling rewrite to quicker bytecode
 220     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
 221 #ifndef ASSERT
 222     __ jmpb(L_patch_done);
 223 #else
 224     __ jmp(L_patch_done);
 225 #endif
 226     __ bind(L_fast_patch);
 227   }
 228 
 229 #ifdef ASSERT
 230   Label L_okay;
 231   __ load_unsigned_byte(temp_reg, at_bcp(0));
 232   __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
 233   __ jcc(Assembler::equal, L_okay);
 234   __ cmpl(temp_reg, bc_reg);
 235   __ jcc(Assembler::equal, L_okay);
 236   __ stop("patching the wrong bytecode");
 237   __ bind(L_okay);
 238 #endif
 239 
 240   // patch bytecode
 241   __ movb(at_bcp(0), bc_reg);
 242   __ bind(L_patch_done);
 243 }
 244 // Individual instructions
 245 
 246 
 247 void TemplateTable::nop() {
 248   transition(vtos, vtos);
 249   // nothing to do
 250 }
 251 
 252 void TemplateTable::shouldnotreachhere() {
 253   transition(vtos, vtos);
 254   __ stop("shouldnotreachhere bytecode");
 255 }
 256 
 257 void TemplateTable::aconst_null() {
 258   transition(vtos, atos);
 259   __ xorl(rax, rax);
 260 }
 261 
 262 void TemplateTable::iconst(int value) {
 263   transition(vtos, itos);
 264   if (value == 0) {
 265     __ xorl(rax, rax);
 266   } else {
 267     __ movl(rax, value);
 268   }
 269 }
 270 
 271 void TemplateTable::lconst(int value) {
 272   transition(vtos, ltos);
 273   if (value == 0) {
 274     __ xorl(rax, rax);
 275   } else {
 276     __ movl(rax, value);
 277   }
 278 #ifndef _LP64
 279   assert(value >= 0, "check this code");
 280   __ xorptr(rdx, rdx);
 281 #endif
 282 }
 283 
 284 
 285 
 286 void TemplateTable::fconst(int value) {
 287   transition(vtos, ftos);
 288   if (UseSSE >= 1) {
 289     static float one = 1.0f, two = 2.0f;
 290     switch (value) {
 291     case 0:
 292       __ xorps(xmm0, xmm0);
 293       break;
 294     case 1:
 295       __ movflt(xmm0, ExternalAddress((address) &one), rscratch1);
 296       break;
 297     case 2:
 298       __ movflt(xmm0, ExternalAddress((address) &two), rscratch1);
 299       break;
 300     default:
 301       ShouldNotReachHere();
 302       break;
 303     }
 304   } else {
 305 #ifdef _LP64
 306     ShouldNotReachHere();
 307 #else
 308            if (value == 0) { __ fldz();
 309     } else if (value == 1) { __ fld1();
 310     } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
 311     } else                 { ShouldNotReachHere();
 312     }
 313 #endif // _LP64
 314   }
 315 }
 316 
 317 void TemplateTable::dconst(int value) {
 318   transition(vtos, dtos);
 319   if (UseSSE >= 2) {
 320     static double one = 1.0;
 321     switch (value) {
 322     case 0:
 323       __ xorpd(xmm0, xmm0);
 324       break;
 325     case 1:
 326       __ movdbl(xmm0, ExternalAddress((address) &one), rscratch1);
 327       break;
 328     default:
 329       ShouldNotReachHere();
 330       break;
 331     }
 332   } else {
 333 #ifdef _LP64
 334     ShouldNotReachHere();
 335 #else
 336            if (value == 0) { __ fldz();
 337     } else if (value == 1) { __ fld1();
 338     } else                 { ShouldNotReachHere();
 339     }
 340 #endif
 341   }
 342 }
 343 
 344 void TemplateTable::bipush() {
 345   transition(vtos, itos);
 346   __ load_signed_byte(rax, at_bcp(1));
 347 }
 348 
 349 void TemplateTable::sipush() {
 350   transition(vtos, itos);
 351   __ load_unsigned_short(rax, at_bcp(1));
 352   __ bswapl(rax);
 353   __ sarl(rax, 16);
 354 }
 355 
 356 void TemplateTable::ldc(LdcType type) {
 357   transition(vtos, vtos);
 358   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 359   Label call_ldc, notFloat, notClass, notInt, Done;
 360 
 361   if (is_ldc_wide(type)) {
 362     __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 363   } else {
 364     __ load_unsigned_byte(rbx, at_bcp(1));
 365   }
 366 
 367   __ get_cpool_and_tags(rcx, rax);
 368   const int base_offset = ConstantPool::header_size() * wordSize;
 369   const int tags_offset = Array<u1>::base_offset_in_bytes();
 370 
 371   // get type
 372   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 373 
 374   // unresolved class - get the resolved class
 375   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
 376   __ jccb(Assembler::equal, call_ldc);
 377 
 378   // unresolved class in error state - call into runtime to throw the error
 379   // from the first resolution attempt
 380   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
 381   __ jccb(Assembler::equal, call_ldc);
 382 
 383   // resolved class - need to call vm to get java mirror of the class
 384   __ cmpl(rdx, JVM_CONSTANT_Class);
 385   __ jcc(Assembler::notEqual, notClass);
 386 
 387   __ bind(call_ldc);
 388 
 389   __ movl(rarg, is_ldc_wide(type) ? 1 : 0);
 390   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
 391 
 392   __ push(atos);
 393   __ jmp(Done);
 394 
 395   __ bind(notClass);
 396   __ cmpl(rdx, JVM_CONSTANT_Float);
 397   __ jccb(Assembler::notEqual, notFloat);
 398 
 399   // ftos
 400   __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
 401   __ push(ftos);
 402   __ jmp(Done);
 403 
 404   __ bind(notFloat);
 405   __ cmpl(rdx, JVM_CONSTANT_Integer);
 406   __ jccb(Assembler::notEqual, notInt);
 407 
 408   // itos
 409   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
 410   __ push(itos);
 411   __ jmp(Done);
 412 
 413   // assume the tag is for condy; if not, the VM runtime will tell us
 414   __ bind(notInt);
 415   condy_helper(Done);
 416 
 417   __ bind(Done);
 418 }
 419 
 420 // Fast path for caching oop constants.
 421 void TemplateTable::fast_aldc(LdcType type) {
 422   transition(vtos, atos);
 423 
 424   Register result = rax;
 425   Register tmp = rdx;
 426   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 427   int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
 428 
 429   Label resolved;
 430 
 431   // We are resolved if the resolved reference cache entry contains a
 432   // non-null object (String, MethodType, etc.)
 433   assert_different_registers(result, tmp);
 434   __ get_cache_index_at_bcp(tmp, 1, index_size);
 435   __ load_resolved_reference_at_index(result, tmp);
 436   __ testptr(result, result);
 437   __ jcc(Assembler::notZero, resolved);
 438 
 439   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 440 
 441   // first time invocation - must resolve first
 442   __ movl(rarg, (int)bytecode());
 443   __ call_VM(result, entry, rarg);
 444   __ bind(resolved);
 445 
 446   { // Check for the null sentinel.
 447     // If we just called the VM, it already did the mapping for us,
 448     // but it's harmless to retry.
 449     Label notNull;
 450     ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
 451     __ movptr(tmp, null_sentinel);
 452     __ resolve_oop_handle(tmp, rscratch2);
 453     __ cmpoop(tmp, result);
 454     __ jccb(Assembler::notEqual, notNull);
 455     __ xorptr(result, result);  // NULL object reference
 456     __ bind(notNull);
 457   }
 458 
 459   if (VerifyOops) {
 460     __ verify_oop(result);
 461   }
 462 }
 463 
 464 void TemplateTable::ldc2_w() {
 465   transition(vtos, vtos);
 466   Label notDouble, notLong, Done;
 467   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 468 
 469   __ get_cpool_and_tags(rcx, rax);
 470   const int base_offset = ConstantPool::header_size() * wordSize;
 471   const int tags_offset = Array<u1>::base_offset_in_bytes();
 472 
 473   // get type
 474   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 475   __ cmpl(rdx, JVM_CONSTANT_Double);
 476   __ jccb(Assembler::notEqual, notDouble);
 477 
 478   // dtos
 479   __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
 480   __ push(dtos);
 481 
 482   __ jmp(Done);
 483   __ bind(notDouble);
 484   __ cmpl(rdx, JVM_CONSTANT_Long);
 485   __ jccb(Assembler::notEqual, notLong);
 486 
 487   // ltos
 488   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
 489   NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
 490   __ push(ltos);
 491   __ jmp(Done);
 492 
 493   __ bind(notLong);
 494   condy_helper(Done);
 495 
 496   __ bind(Done);
 497 }
 498 
 499 void TemplateTable::condy_helper(Label& Done) {
 500   const Register obj = rax;
 501   const Register off = rbx;
 502   const Register flags = rcx;
 503   const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 504   __ movl(rarg, (int)bytecode());
 505   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
 506 #ifndef _LP64
 507   // borrow rdi from locals
 508   __ get_thread(rdi);
 509   __ get_vm_result_2(flags, rdi);
 510   __ restore_locals();
 511 #else
 512   __ get_vm_result_2(flags, r15_thread);
 513 #endif
 514   // VMr = obj = base address to find primitive value to push
 515   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 516   __ movl(off, flags);
 517   __ andl(off, ConstantPoolCacheEntry::field_index_mask);
 518   const Address field(obj, off, Address::times_1, 0*wordSize);
 519 
 520   // What sort of thing are we loading?
 521   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
 522   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
 523 
 524   switch (bytecode()) {
 525   case Bytecodes::_ldc:
 526   case Bytecodes::_ldc_w:
 527     {
 528       // tos in (itos, ftos, stos, btos, ctos, ztos)
 529       Label notInt, notFloat, notShort, notByte, notChar, notBool;
 530       __ cmpl(flags, itos);
 531       __ jccb(Assembler::notEqual, notInt);
 532       // itos
 533       __ movl(rax, field);
 534       __ push(itos);
 535       __ jmp(Done);
 536 
 537       __ bind(notInt);
 538       __ cmpl(flags, ftos);
 539       __ jccb(Assembler::notEqual, notFloat);
 540       // ftos
 541       __ load_float(field);
 542       __ push(ftos);
 543       __ jmp(Done);
 544 
 545       __ bind(notFloat);
 546       __ cmpl(flags, stos);
 547       __ jccb(Assembler::notEqual, notShort);
 548       // stos
 549       __ load_signed_short(rax, field);
 550       __ push(stos);
 551       __ jmp(Done);
 552 
 553       __ bind(notShort);
 554       __ cmpl(flags, btos);
 555       __ jccb(Assembler::notEqual, notByte);
 556       // btos
 557       __ load_signed_byte(rax, field);
 558       __ push(btos);
 559       __ jmp(Done);
 560 
 561       __ bind(notByte);
 562       __ cmpl(flags, ctos);
 563       __ jccb(Assembler::notEqual, notChar);
 564       // ctos
 565       __ load_unsigned_short(rax, field);
 566       __ push(ctos);
 567       __ jmp(Done);
 568 
 569       __ bind(notChar);
 570       __ cmpl(flags, ztos);
 571       __ jccb(Assembler::notEqual, notBool);
 572       // ztos
 573       __ load_signed_byte(rax, field);
 574       __ push(ztos);
 575       __ jmp(Done);
 576 
 577       __ bind(notBool);
 578       break;
 579     }
 580 
 581   case Bytecodes::_ldc2_w:
 582     {
 583       Label notLong, notDouble;
 584       __ cmpl(flags, ltos);
 585       __ jccb(Assembler::notEqual, notLong);
 586       // ltos
 587       // Loading high word first because movptr clobbers rax
 588       NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
 589       __ movptr(rax, field);
 590       __ push(ltos);
 591       __ jmp(Done);
 592 
 593       __ bind(notLong);
 594       __ cmpl(flags, dtos);
 595       __ jccb(Assembler::notEqual, notDouble);
 596       // dtos
 597       __ load_double(field);
 598       __ push(dtos);
 599       __ jmp(Done);
 600 
 601       __ bind(notDouble);
 602       break;
 603     }
 604 
 605   default:
 606     ShouldNotReachHere();
 607   }
 608 
 609   __ stop("bad ldc/condy");
 610 }
 611 
 612 void TemplateTable::locals_index(Register reg, int offset) {
 613   __ load_unsigned_byte(reg, at_bcp(offset));
 614   __ negptr(reg);
 615 }
 616 
 617 void TemplateTable::iload() {
 618   iload_internal();
 619 }
 620 
 621 void TemplateTable::nofast_iload() {
 622   iload_internal(may_not_rewrite);
 623 }
 624 
 625 void TemplateTable::iload_internal(RewriteControl rc) {
 626   transition(vtos, itos);
 627   if (RewriteFrequentPairs && rc == may_rewrite) {
 628     Label rewrite, done;
 629     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 630     LP64_ONLY(assert(rbx != bc, "register damaged"));
 631 
 632     // get next byte
 633     __ load_unsigned_byte(rbx,
 634                           at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 635     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 636     // last two iloads in a pair.  Comparing against fast_iload means that
 637     // the next bytecode is neither an iload or a caload, and therefore
 638     // an iload pair.
 639     __ cmpl(rbx, Bytecodes::_iload);
 640     __ jcc(Assembler::equal, done);
 641 
 642     __ cmpl(rbx, Bytecodes::_fast_iload);
 643     __ movl(bc, Bytecodes::_fast_iload2);
 644 
 645     __ jccb(Assembler::equal, rewrite);
 646 
 647     // if _caload, rewrite to fast_icaload
 648     __ cmpl(rbx, Bytecodes::_caload);
 649     __ movl(bc, Bytecodes::_fast_icaload);
 650     __ jccb(Assembler::equal, rewrite);
 651 
 652     // rewrite so iload doesn't check again.
 653     __ movl(bc, Bytecodes::_fast_iload);
 654 
 655     // rewrite
 656     // bc: fast bytecode
 657     __ bind(rewrite);
 658     patch_bytecode(Bytecodes::_iload, bc, rbx, false);
 659     __ bind(done);
 660   }
 661 
 662   // Get the local value into tos
 663   locals_index(rbx);
 664   __ movl(rax, iaddress(rbx));
 665 }
 666 
 667 void TemplateTable::fast_iload2() {
 668   transition(vtos, itos);
 669   locals_index(rbx);
 670   __ movl(rax, iaddress(rbx));
 671   __ push(itos);
 672   locals_index(rbx, 3);
 673   __ movl(rax, iaddress(rbx));
 674 }
 675 
 676 void TemplateTable::fast_iload() {
 677   transition(vtos, itos);
 678   locals_index(rbx);
 679   __ movl(rax, iaddress(rbx));
 680 }
 681 
 682 void TemplateTable::lload() {
 683   transition(vtos, ltos);
 684   locals_index(rbx);
 685   __ movptr(rax, laddress(rbx));
 686   NOT_LP64(__ movl(rdx, haddress(rbx)));
 687 }
 688 
 689 void TemplateTable::fload() {
 690   transition(vtos, ftos);
 691   locals_index(rbx);
 692   __ load_float(faddress(rbx));
 693 }
 694 
 695 void TemplateTable::dload() {
 696   transition(vtos, dtos);
 697   locals_index(rbx);
 698   __ load_double(daddress(rbx));
 699 }
 700 
 701 void TemplateTable::aload() {
 702   transition(vtos, atos);
 703   locals_index(rbx);
 704   __ movptr(rax, aaddress(rbx));
 705 }
 706 
 707 void TemplateTable::locals_index_wide(Register reg) {
 708   __ load_unsigned_short(reg, at_bcp(2));
 709   __ bswapl(reg);
 710   __ shrl(reg, 16);
 711   __ negptr(reg);
 712 }
 713 
 714 void TemplateTable::wide_iload() {
 715   transition(vtos, itos);
 716   locals_index_wide(rbx);
 717   __ movl(rax, iaddress(rbx));
 718 }
 719 
 720 void TemplateTable::wide_lload() {
 721   transition(vtos, ltos);
 722   locals_index_wide(rbx);
 723   __ movptr(rax, laddress(rbx));
 724   NOT_LP64(__ movl(rdx, haddress(rbx)));
 725 }
 726 
 727 void TemplateTable::wide_fload() {
 728   transition(vtos, ftos);
 729   locals_index_wide(rbx);
 730   __ load_float(faddress(rbx));
 731 }
 732 
 733 void TemplateTable::wide_dload() {
 734   transition(vtos, dtos);
 735   locals_index_wide(rbx);
 736   __ load_double(daddress(rbx));
 737 }
 738 
 739 void TemplateTable::wide_aload() {
 740   transition(vtos, atos);
 741   locals_index_wide(rbx);
 742   __ movptr(rax, aaddress(rbx));
 743 }
 744 
 745 void TemplateTable::index_check(Register array, Register index) {
 746   // Pop ptr into array
 747   __ pop_ptr(array);
 748   index_check_without_pop(array, index);
 749 }
 750 
 751 void TemplateTable::index_check_without_pop(Register array, Register index) {
 752   // destroys rbx
 753   // check array
 754   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 755   // sign extend index for use by indexed load
 756   __ movl2ptr(index, index);
 757   // check index
 758   __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
 759   if (index != rbx) {
 760     // ??? convention: move aberrant index into rbx for exception message
 761     assert(rbx != array, "different registers");
 762     __ movl(rbx, index);
 763   }
 764   Label skip;
 765   __ jccb(Assembler::below, skip);
 766   // Pass array to create more detailed exceptions.
 767   __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
 768   __ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
 769   __ bind(skip);
 770 }
 771 
 772 void TemplateTable::iaload() {
 773   transition(itos, itos);
 774   // rax: index
 775   // rdx: array
 776   index_check(rdx, rax); // kills rbx
 777   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
 778                     Address(rdx, rax, Address::times_4,
 779                             arrayOopDesc::base_offset_in_bytes(T_INT)),
 780                     noreg, noreg);
 781 }
 782 
 783 void TemplateTable::laload() {
 784   transition(itos, ltos);
 785   // rax: index
 786   // rdx: array
 787   index_check(rdx, rax); // kills rbx
 788   NOT_LP64(__ mov(rbx, rax));
 789   // rbx,: index
 790   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
 791                     Address(rdx, rbx, Address::times_8,
 792                             arrayOopDesc::base_offset_in_bytes(T_LONG)),
 793                     noreg, noreg);
 794 }
 795 
 796 
 797 
 798 void TemplateTable::faload() {
 799   transition(itos, ftos);
 800   // rax: index
 801   // rdx: array
 802   index_check(rdx, rax); // kills rbx
 803   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
 804                     Address(rdx, rax,
 805                             Address::times_4,
 806                             arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
 807                     noreg, noreg);
 808 }
 809 
 810 void TemplateTable::daload() {
 811   transition(itos, dtos);
 812   // rax: index
 813   // rdx: array
 814   index_check(rdx, rax); // kills rbx
 815   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
 816                     Address(rdx, rax,
 817                             Address::times_8,
 818                             arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
 819                     noreg, noreg);
 820 }
 821 
 822 void TemplateTable::aaload() {
 823   transition(itos, atos);
 824   // rax: index
 825   // rdx: array
 826   index_check(rdx, rax); // kills rbx
 827   do_oop_load(_masm,
 828               Address(rdx, rax,
 829                       UseCompressedOops ? Address::times_4 : Address::times_ptr,
 830                       arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 831               rax,
 832               IS_ARRAY);
 833 }
 834 
 835 void TemplateTable::baload() {
 836   transition(itos, itos);
 837   // rax: index
 838   // rdx: array
 839   index_check(rdx, rax); // kills rbx
 840   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
 841                     Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
 842                     noreg, noreg);
 843 }
 844 
 845 void TemplateTable::caload() {
 846   transition(itos, itos);
 847   // rax: index
 848   // rdx: array
 849   index_check(rdx, rax); // kills rbx
 850   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 851                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 852                     noreg, noreg);
 853 }
 854 
 855 // iload followed by caload frequent pair
 856 void TemplateTable::fast_icaload() {
 857   transition(vtos, itos);
 858   // load index out of locals
 859   locals_index(rbx);
 860   __ movl(rax, iaddress(rbx));
 861 
 862   // rax: index
 863   // rdx: array
 864   index_check(rdx, rax); // kills rbx
 865   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 866                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 867                     noreg, noreg);
 868 }
 869 
 870 
 871 void TemplateTable::saload() {
 872   transition(itos, itos);
 873   // rax: index
 874   // rdx: array
 875   index_check(rdx, rax); // kills rbx
 876   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
 877                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
 878                     noreg, noreg);
 879 }
 880 
 881 void TemplateTable::iload(int n) {
 882   transition(vtos, itos);
 883   __ movl(rax, iaddress(n));
 884 }
 885 
 886 void TemplateTable::lload(int n) {
 887   transition(vtos, ltos);
 888   __ movptr(rax, laddress(n));
 889   NOT_LP64(__ movptr(rdx, haddress(n)));
 890 }
 891 
 892 void TemplateTable::fload(int n) {
 893   transition(vtos, ftos);
 894   __ load_float(faddress(n));
 895 }
 896 
 897 void TemplateTable::dload(int n) {
 898   transition(vtos, dtos);
 899   __ load_double(daddress(n));
 900 }
 901 
 902 void TemplateTable::aload(int n) {
 903   transition(vtos, atos);
 904   __ movptr(rax, aaddress(n));
 905 }
 906 
 907 void TemplateTable::aload_0() {
 908   aload_0_internal();
 909 }
 910 
 911 void TemplateTable::nofast_aload_0() {
 912   aload_0_internal(may_not_rewrite);
 913 }
 914 
 915 void TemplateTable::aload_0_internal(RewriteControl rc) {
 916   transition(vtos, atos);
 917   // According to bytecode histograms, the pairs:
 918   //
 919   // _aload_0, _fast_igetfield
 920   // _aload_0, _fast_agetfield
 921   // _aload_0, _fast_fgetfield
 922   //
 923   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 924   // _aload_0 bytecode checks if the next bytecode is either
 925   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 926   // rewrites the current bytecode into a pair bytecode; otherwise it
 927   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 928   // the pair check anymore.
 929   //
 930   // Note: If the next bytecode is _getfield, the rewrite must be
 931   //       delayed, otherwise we may miss an opportunity for a pair.
 932   //
 933   // Also rewrite frequent pairs
 934   //   aload_0, aload_1
 935   //   aload_0, iload_1
 936   // These bytecodes with a small amount of code are most profitable
 937   // to rewrite
 938   if (RewriteFrequentPairs && rc == may_rewrite) {
 939     Label rewrite, done;
 940 
 941     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 942     LP64_ONLY(assert(rbx != bc, "register damaged"));
 943 
 944     // get next byte
 945     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 946 
 947     // if _getfield then wait with rewrite
 948     __ cmpl(rbx, Bytecodes::_getfield);
 949     __ jcc(Assembler::equal, done);
 950 
 951     // if _igetfield then rewrite to _fast_iaccess_0
 952     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 953     __ cmpl(rbx, Bytecodes::_fast_igetfield);
 954     __ movl(bc, Bytecodes::_fast_iaccess_0);
 955     __ jccb(Assembler::equal, rewrite);
 956 
 957     // if _agetfield then rewrite to _fast_aaccess_0
 958     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 959     __ cmpl(rbx, Bytecodes::_fast_agetfield);
 960     __ movl(bc, Bytecodes::_fast_aaccess_0);
 961     __ jccb(Assembler::equal, rewrite);
 962 
 963     // if _fgetfield then rewrite to _fast_faccess_0
 964     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 965     __ cmpl(rbx, Bytecodes::_fast_fgetfield);
 966     __ movl(bc, Bytecodes::_fast_faccess_0);
 967     __ jccb(Assembler::equal, rewrite);
 968 
 969     // else rewrite to _fast_aload0
 970     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 971     __ movl(bc, Bytecodes::_fast_aload_0);
 972 
 973     // rewrite
 974     // bc: fast bytecode
 975     __ bind(rewrite);
 976     patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
 977 
 978     __ bind(done);
 979   }
 980 
 981   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 982   aload(0);
 983 }
 984 
 985 void TemplateTable::istore() {
 986   transition(itos, vtos);
 987   locals_index(rbx);
 988   __ movl(iaddress(rbx), rax);
 989 }
 990 
 991 
 992 void TemplateTable::lstore() {
 993   transition(ltos, vtos);
 994   locals_index(rbx);
 995   __ movptr(laddress(rbx), rax);
 996   NOT_LP64(__ movptr(haddress(rbx), rdx));
 997 }
 998 
 999 void TemplateTable::fstore() {
1000   transition(ftos, vtos);
1001   locals_index(rbx);
1002   __ store_float(faddress(rbx));
1003 }
1004 
1005 void TemplateTable::dstore() {
1006   transition(dtos, vtos);
1007   locals_index(rbx);
1008   __ store_double(daddress(rbx));
1009 }
1010 
1011 void TemplateTable::astore() {
1012   transition(vtos, vtos);
1013   __ pop_ptr(rax);
1014   locals_index(rbx);
1015   __ movptr(aaddress(rbx), rax);
1016 }
1017 
1018 void TemplateTable::wide_istore() {
1019   transition(vtos, vtos);
1020   __ pop_i();
1021   locals_index_wide(rbx);
1022   __ movl(iaddress(rbx), rax);
1023 }
1024 
1025 void TemplateTable::wide_lstore() {
1026   transition(vtos, vtos);
1027   NOT_LP64(__ pop_l(rax, rdx));
1028   LP64_ONLY(__ pop_l());
1029   locals_index_wide(rbx);
1030   __ movptr(laddress(rbx), rax);
1031   NOT_LP64(__ movl(haddress(rbx), rdx));
1032 }
1033 
1034 void TemplateTable::wide_fstore() {
1035 #ifdef _LP64
1036   transition(vtos, vtos);
1037   __ pop_f(xmm0);
1038   locals_index_wide(rbx);
1039   __ movflt(faddress(rbx), xmm0);
1040 #else
1041   wide_istore();
1042 #endif
1043 }
1044 
1045 void TemplateTable::wide_dstore() {
1046 #ifdef _LP64
1047   transition(vtos, vtos);
1048   __ pop_d(xmm0);
1049   locals_index_wide(rbx);
1050   __ movdbl(daddress(rbx), xmm0);
1051 #else
1052   wide_lstore();
1053 #endif
1054 }
1055 
1056 void TemplateTable::wide_astore() {
1057   transition(vtos, vtos);
1058   __ pop_ptr(rax);
1059   locals_index_wide(rbx);
1060   __ movptr(aaddress(rbx), rax);
1061 }
1062 
1063 void TemplateTable::iastore() {
1064   transition(itos, vtos);
1065   __ pop_i(rbx);
1066   // rax: value
1067   // rbx: index
1068   // rdx: array
1069   index_check(rdx, rbx); // prefer index in rbx
1070   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1071                      Address(rdx, rbx, Address::times_4,
1072                              arrayOopDesc::base_offset_in_bytes(T_INT)),
1073                      rax, noreg, noreg, noreg);
1074 }
1075 
1076 void TemplateTable::lastore() {
1077   transition(ltos, vtos);
1078   __ pop_i(rbx);
1079   // rax,: low(value)
1080   // rcx: array
1081   // rdx: high(value)
1082   index_check(rcx, rbx);  // prefer index in rbx,
1083   // rbx,: index
1084   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1085                      Address(rcx, rbx, Address::times_8,
1086                              arrayOopDesc::base_offset_in_bytes(T_LONG)),
1087                      noreg /* ltos */, noreg, noreg, noreg);
1088 }
1089 
1090 
1091 void TemplateTable::fastore() {
1092   transition(ftos, vtos);
1093   __ pop_i(rbx);
1094   // value is in UseSSE >= 1 ? xmm0 : ST(0)
1095   // rbx:  index
1096   // rdx:  array
1097   index_check(rdx, rbx); // prefer index in rbx
1098   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1099                      Address(rdx, rbx, Address::times_4,
1100                              arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1101                      noreg /* ftos */, noreg, noreg, noreg);
1102 }
1103 
1104 void TemplateTable::dastore() {
1105   transition(dtos, vtos);
1106   __ pop_i(rbx);
1107   // value is in UseSSE >= 2 ? xmm0 : ST(0)
1108   // rbx:  index
1109   // rdx:  array
1110   index_check(rdx, rbx); // prefer index in rbx
1111   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1112                      Address(rdx, rbx, Address::times_8,
1113                              arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1114                      noreg /* dtos */, noreg, noreg, noreg);
1115 }
1116 
1117 void TemplateTable::aastore() {
1118   Label is_null, ok_is_subtype, done;
1119   transition(vtos, vtos);
1120   // stack: ..., array, index, value
1121   __ movptr(rax, at_tos());    // value
1122   __ movl(rcx, at_tos_p1()); // index
1123   __ movptr(rdx, at_tos_p2()); // array
1124 
1125   Address element_address(rdx, rcx,
1126                           UseCompressedOops? Address::times_4 : Address::times_ptr,
1127                           arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1128 
1129   index_check_without_pop(rdx, rcx);     // kills rbx
1130   __ testptr(rax, rax);
1131   __ jcc(Assembler::zero, is_null);
1132 
1133   // Move subklass into rbx
1134   __ load_klass(rbx, rax, rscratch1);
1135   // Move superklass into rax
1136   __ load_klass(rax, rdx, rscratch1);
1137   __ movptr(rax, Address(rax,
1138                          ObjArrayKlass::element_klass_offset()));
1139 
1140   // Generate subtype check.  Blows rcx, rdi
1141   // Superklass in rax.  Subklass in rbx.
1142   __ gen_subtype_check(rbx, ok_is_subtype);
1143 
1144   // Come here on failure
1145   // object is at TOS
1146   __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1147 
1148   // Come here on success
1149   __ bind(ok_is_subtype);
1150 
1151   // Get the value we will store
1152   __ movptr(rax, at_tos());
1153   __ movl(rcx, at_tos_p1()); // index
1154   // Now store using the appropriate barrier
1155   do_oop_store(_masm, element_address, rax, IS_ARRAY);
1156   __ jmp(done);
1157 
1158   // Have a NULL in rax, rdx=array, ecx=index.  Store NULL at ary[idx]
1159   __ bind(is_null);
1160   __ profile_null_seen(rbx);
1161 
1162   // Store a NULL
1163   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1164 
1165   // Pop stack arguments
1166   __ bind(done);
1167   __ addptr(rsp, 3 * Interpreter::stackElementSize);
1168 }
1169 
1170 void TemplateTable::bastore() {
1171   transition(itos, vtos);
1172   __ pop_i(rbx);
1173   // rax: value
1174   // rbx: index
1175   // rdx: array
1176   index_check(rdx, rbx); // prefer index in rbx
1177   // Need to check whether array is boolean or byte
1178   // since both types share the bastore bytecode.
1179   __ load_klass(rcx, rdx, rscratch1);
1180   __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1181   int diffbit = Klass::layout_helper_boolean_diffbit();
1182   __ testl(rcx, diffbit);
1183   Label L_skip;
1184   __ jccb(Assembler::zero, L_skip);
1185   __ andl(rax, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1186   __ bind(L_skip);
1187   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1188                      Address(rdx, rbx,Address::times_1,
1189                              arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1190                      rax, noreg, noreg, noreg);
1191 }
1192 
1193 void TemplateTable::castore() {
1194   transition(itos, vtos);
1195   __ pop_i(rbx);
1196   // rax: value
1197   // rbx: index
1198   // rdx: array
1199   index_check(rdx, rbx);  // prefer index in rbx
1200   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1201                      Address(rdx, rbx, Address::times_2,
1202                              arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1203                      rax, noreg, noreg, noreg);
1204 }
1205 
1206 
1207 void TemplateTable::sastore() {
1208   castore();
1209 }
1210 
1211 void TemplateTable::istore(int n) {
1212   transition(itos, vtos);
1213   __ movl(iaddress(n), rax);
1214 }
1215 
1216 void TemplateTable::lstore(int n) {
1217   transition(ltos, vtos);
1218   __ movptr(laddress(n), rax);
1219   NOT_LP64(__ movptr(haddress(n), rdx));
1220 }
1221 
1222 void TemplateTable::fstore(int n) {
1223   transition(ftos, vtos);
1224   __ store_float(faddress(n));
1225 }
1226 
1227 void TemplateTable::dstore(int n) {
1228   transition(dtos, vtos);
1229   __ store_double(daddress(n));
1230 }
1231 
1232 
1233 void TemplateTable::astore(int n) {
1234   transition(vtos, vtos);
1235   __ pop_ptr(rax);
1236   __ movptr(aaddress(n), rax);
1237 }
1238 
1239 void TemplateTable::pop() {
1240   transition(vtos, vtos);
1241   __ addptr(rsp, Interpreter::stackElementSize);
1242 }
1243 
1244 void TemplateTable::pop2() {
1245   transition(vtos, vtos);
1246   __ addptr(rsp, 2 * Interpreter::stackElementSize);
1247 }
1248 
1249 
1250 void TemplateTable::dup() {
1251   transition(vtos, vtos);
1252   __ load_ptr(0, rax);
1253   __ push_ptr(rax);
1254   // stack: ..., a, a
1255 }
1256 
1257 void TemplateTable::dup_x1() {
1258   transition(vtos, vtos);
1259   // stack: ..., a, b
1260   __ load_ptr( 0, rax);  // load b
1261   __ load_ptr( 1, rcx);  // load a
1262   __ store_ptr(1, rax);  // store b
1263   __ store_ptr(0, rcx);  // store a
1264   __ push_ptr(rax);      // push b
1265   // stack: ..., b, a, b
1266 }
1267 
1268 void TemplateTable::dup_x2() {
1269   transition(vtos, vtos);
1270   // stack: ..., a, b, c
1271   __ load_ptr( 0, rax);  // load c
1272   __ load_ptr( 2, rcx);  // load a
1273   __ store_ptr(2, rax);  // store c in a
1274   __ push_ptr(rax);      // push c
1275   // stack: ..., c, b, c, c
1276   __ load_ptr( 2, rax);  // load b
1277   __ store_ptr(2, rcx);  // store a in b
1278   // stack: ..., c, a, c, c
1279   __ store_ptr(1, rax);  // store b in c
1280   // stack: ..., c, a, b, c
1281 }
1282 
1283 void TemplateTable::dup2() {
1284   transition(vtos, vtos);
1285   // stack: ..., a, b
1286   __ load_ptr(1, rax);  // load a
1287   __ push_ptr(rax);     // push a
1288   __ load_ptr(1, rax);  // load b
1289   __ push_ptr(rax);     // push b
1290   // stack: ..., a, b, a, b
1291 }
1292 
1293 
1294 void TemplateTable::dup2_x1() {
1295   transition(vtos, vtos);
1296   // stack: ..., a, b, c
1297   __ load_ptr( 0, rcx);  // load c
1298   __ load_ptr( 1, rax);  // load b
1299   __ push_ptr(rax);      // push b
1300   __ push_ptr(rcx);      // push c
1301   // stack: ..., a, b, c, b, c
1302   __ store_ptr(3, rcx);  // store c in b
1303   // stack: ..., a, c, c, b, c
1304   __ load_ptr( 4, rcx);  // load a
1305   __ store_ptr(2, rcx);  // store a in 2nd c
1306   // stack: ..., a, c, a, b, c
1307   __ store_ptr(4, rax);  // store b in a
1308   // stack: ..., b, c, a, b, c
1309 }
1310 
1311 void TemplateTable::dup2_x2() {
1312   transition(vtos, vtos);
1313   // stack: ..., a, b, c, d
1314   __ load_ptr( 0, rcx);  // load d
1315   __ load_ptr( 1, rax);  // load c
1316   __ push_ptr(rax);      // push c
1317   __ push_ptr(rcx);      // push d
1318   // stack: ..., a, b, c, d, c, d
1319   __ load_ptr( 4, rax);  // load b
1320   __ store_ptr(2, rax);  // store b in d
1321   __ store_ptr(4, rcx);  // store d in b
1322   // stack: ..., a, d, c, b, c, d
1323   __ load_ptr( 5, rcx);  // load a
1324   __ load_ptr( 3, rax);  // load c
1325   __ store_ptr(3, rcx);  // store a in c
1326   __ store_ptr(5, rax);  // store c in a
1327   // stack: ..., c, d, a, b, c, d
1328 }
1329 
1330 void TemplateTable::swap() {
1331   transition(vtos, vtos);
1332   // stack: ..., a, b
1333   __ load_ptr( 1, rcx);  // load a
1334   __ load_ptr( 0, rax);  // load b
1335   __ store_ptr(0, rcx);  // store a in b
1336   __ store_ptr(1, rax);  // store b in a
1337   // stack: ..., b, a
1338 }
1339 
1340 void TemplateTable::iop2(Operation op) {
1341   transition(itos, itos);
1342   switch (op) {
1343   case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
1344   case sub  : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1345   case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
1346   case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
1347   case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
1348   case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
1349   case shl  : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax);      break;
1350   case shr  : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax);      break;
1351   case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax);      break;
1352   default   : ShouldNotReachHere();
1353   }
1354 }
1355 
1356 void TemplateTable::lop2(Operation op) {
1357   transition(ltos, ltos);
1358 #ifdef _LP64
1359   switch (op) {
1360   case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
1361   case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
1362   case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
1363   case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
1364   case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
1365   default   : ShouldNotReachHere();
1366   }
1367 #else
1368   __ pop_l(rbx, rcx);
1369   switch (op) {
1370     case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1371     case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1372                 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1373     case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1374     case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1375     case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1376     default   : ShouldNotReachHere();
1377   }
1378 #endif
1379 }
1380 
1381 void TemplateTable::idiv() {
1382   transition(itos, itos);
1383   __ movl(rcx, rax);
1384   __ pop_i(rax);
1385   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1386   //       they are not equal, one could do a normal division (no correction
1387   //       needed), which may speed up this implementation for the common case.
1388   //       (see also JVM spec., p.243 & p.271)
1389   __ corrected_idivl(rcx);
1390 }
1391 
1392 void TemplateTable::irem() {
1393   transition(itos, itos);
1394   __ movl(rcx, rax);
1395   __ pop_i(rax);
1396   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1397   //       they are not equal, one could do a normal division (no correction
1398   //       needed), which may speed up this implementation for the common case.
1399   //       (see also JVM spec., p.243 & p.271)
1400   __ corrected_idivl(rcx);
1401   __ movl(rax, rdx);
1402 }
1403 
1404 void TemplateTable::lmul() {
1405   transition(ltos, ltos);
1406 #ifdef _LP64
1407   __ pop_l(rdx);
1408   __ imulq(rax, rdx);
1409 #else
1410   __ pop_l(rbx, rcx);
1411   __ push(rcx); __ push(rbx);
1412   __ push(rdx); __ push(rax);
1413   __ lmul(2 * wordSize, 0);
1414   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1415 #endif
1416 }
1417 
1418 void TemplateTable::ldiv() {
1419   transition(ltos, ltos);
1420 #ifdef _LP64
1421   __ mov(rcx, rax);
1422   __ pop_l(rax);
1423   // generate explicit div0 check
1424   __ testq(rcx, rcx);
1425   __ jump_cc(Assembler::zero,
1426              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1427   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1428   //       they are not equal, one could do a normal division (no correction
1429   //       needed), which may speed up this implementation for the common case.
1430   //       (see also JVM spec., p.243 & p.271)
1431   __ corrected_idivq(rcx); // kills rbx
1432 #else
1433   __ pop_l(rbx, rcx);
1434   __ push(rcx); __ push(rbx);
1435   __ push(rdx); __ push(rax);
1436   // check if y = 0
1437   __ orl(rax, rdx);
1438   __ jump_cc(Assembler::zero,
1439              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1440   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1441   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1442 #endif
1443 }
1444 
1445 void TemplateTable::lrem() {
1446   transition(ltos, ltos);
1447 #ifdef _LP64
1448   __ mov(rcx, rax);
1449   __ pop_l(rax);
1450   __ testq(rcx, rcx);
1451   __ jump_cc(Assembler::zero,
1452              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1453   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1454   //       they are not equal, one could do a normal division (no correction
1455   //       needed), which may speed up this implementation for the common case.
1456   //       (see also JVM spec., p.243 & p.271)
1457   __ corrected_idivq(rcx); // kills rbx
1458   __ mov(rax, rdx);
1459 #else
1460   __ pop_l(rbx, rcx);
1461   __ push(rcx); __ push(rbx);
1462   __ push(rdx); __ push(rax);
1463   // check if y = 0
1464   __ orl(rax, rdx);
1465   __ jump_cc(Assembler::zero,
1466              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1467   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1468   __ addptr(rsp, 4 * wordSize);
1469 #endif
1470 }
1471 
1472 void TemplateTable::lshl() {
1473   transition(itos, ltos);
1474   __ movl(rcx, rax);                             // get shift count
1475   #ifdef _LP64
1476   __ pop_l(rax);                                 // get shift value
1477   __ shlq(rax);
1478 #else
1479   __ pop_l(rax, rdx);                            // get shift value
1480   __ lshl(rdx, rax);
1481 #endif
1482 }
1483 
1484 void TemplateTable::lshr() {
1485 #ifdef _LP64
1486   transition(itos, ltos);
1487   __ movl(rcx, rax);                             // get shift count
1488   __ pop_l(rax);                                 // get shift value
1489   __ sarq(rax);
1490 #else
1491   transition(itos, ltos);
1492   __ mov(rcx, rax);                              // get shift count
1493   __ pop_l(rax, rdx);                            // get shift value
1494   __ lshr(rdx, rax, true);
1495 #endif
1496 }
1497 
1498 void TemplateTable::lushr() {
1499   transition(itos, ltos);
1500 #ifdef _LP64
1501   __ movl(rcx, rax);                             // get shift count
1502   __ pop_l(rax);                                 // get shift value
1503   __ shrq(rax);
1504 #else
1505   __ mov(rcx, rax);                              // get shift count
1506   __ pop_l(rax, rdx);                            // get shift value
1507   __ lshr(rdx, rax);
1508 #endif
1509 }
1510 
1511 void TemplateTable::fop2(Operation op) {
1512   transition(ftos, ftos);
1513 
1514   if (UseSSE >= 1) {
1515     switch (op) {
1516     case add:
1517       __ addss(xmm0, at_rsp());
1518       __ addptr(rsp, Interpreter::stackElementSize);
1519       break;
1520     case sub:
1521       __ movflt(xmm1, xmm0);
1522       __ pop_f(xmm0);
1523       __ subss(xmm0, xmm1);
1524       break;
1525     case mul:
1526       __ mulss(xmm0, at_rsp());
1527       __ addptr(rsp, Interpreter::stackElementSize);
1528       break;
1529     case div:
1530       __ movflt(xmm1, xmm0);
1531       __ pop_f(xmm0);
1532       __ divss(xmm0, xmm1);
1533       break;
1534     case rem:
1535       // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1536       // modulo operation. The frem method calls the function
1537       // double fmod(double x, double y) in math.h. The documentation of fmod states:
1538       // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1539       // (signalling or quiet) is returned.
1540       //
1541       // On x86_32 platforms the FPU is used to perform the modulo operation. The
1542       // reason is that on 32-bit Windows the sign of modulo operations diverges from
1543       // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1544       // The fprem instruction used on x86_32 is functionally equivalent to
1545       // SharedRuntime::frem in that it returns a NaN.
1546 #ifdef _LP64
1547       __ movflt(xmm1, xmm0);
1548       __ pop_f(xmm0);
1549       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1550 #else // !_LP64
1551       __ push_f(xmm0);
1552       __ pop_f();
1553       __ fld_s(at_rsp());
1554       __ fremr(rax);
1555       __ f2ieee();
1556       __ pop(rax);  // pop second operand off the stack
1557       __ push_f();
1558       __ pop_f(xmm0);
1559 #endif // _LP64
1560       break;
1561     default:
1562       ShouldNotReachHere();
1563       break;
1564     }
1565   } else {
1566 #ifdef _LP64
1567     ShouldNotReachHere();
1568 #else // !_LP64
1569     switch (op) {
1570     case add: __ fadd_s (at_rsp());                break;
1571     case sub: __ fsubr_s(at_rsp());                break;
1572     case mul: __ fmul_s (at_rsp());                break;
1573     case div: __ fdivr_s(at_rsp());                break;
1574     case rem: __ fld_s  (at_rsp()); __ fremr(rax); break;
1575     default : ShouldNotReachHere();
1576     }
1577     __ f2ieee();
1578     __ pop(rax);  // pop second operand off the stack
1579 #endif // _LP64
1580   }
1581 }
1582 
1583 void TemplateTable::dop2(Operation op) {
1584   transition(dtos, dtos);
1585   if (UseSSE >= 2) {
1586     switch (op) {
1587     case add:
1588       __ addsd(xmm0, at_rsp());
1589       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1590       break;
1591     case sub:
1592       __ movdbl(xmm1, xmm0);
1593       __ pop_d(xmm0);
1594       __ subsd(xmm0, xmm1);
1595       break;
1596     case mul:
1597       __ mulsd(xmm0, at_rsp());
1598       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1599       break;
1600     case div:
1601       __ movdbl(xmm1, xmm0);
1602       __ pop_d(xmm0);
1603       __ divsd(xmm0, xmm1);
1604       break;
1605     case rem:
1606       // Similar to fop2(), the modulo operation is performed using the
1607       // SharedRuntime::drem method (on x86_64 platforms) or using the
1608       // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1609 #ifdef _LP64
1610       __ movdbl(xmm1, xmm0);
1611       __ pop_d(xmm0);
1612       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1613 #else // !_LP64
1614       __ push_d(xmm0);
1615       __ pop_d();
1616       __ fld_d(at_rsp());
1617       __ fremr(rax);
1618       __ d2ieee();
1619       __ pop(rax);
1620       __ pop(rdx);
1621       __ push_d();
1622       __ pop_d(xmm0);
1623 #endif // _LP64
1624       break;
1625     default:
1626       ShouldNotReachHere();
1627       break;
1628     }
1629   } else {
1630 #ifdef _LP64
1631     ShouldNotReachHere();
1632 #else // !_LP64
1633     switch (op) {
1634     case add: __ fadd_d (at_rsp());                break;
1635     case sub: __ fsubr_d(at_rsp());                break;
1636     case mul: {
1637       // strict semantics
1638       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
1639       __ fmulp();
1640       __ fmul_d (at_rsp());
1641       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
1642       __ fmulp();
1643       break;
1644     }
1645     case div: {
1646       // strict semantics
1647       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
1648       __ fmul_d (at_rsp());
1649       __ fdivrp();
1650       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
1651       __ fmulp();
1652       break;
1653     }
1654     case rem: __ fld_d  (at_rsp()); __ fremr(rax); break;
1655     default : ShouldNotReachHere();
1656     }
1657     __ d2ieee();
1658     // Pop double precision number from rsp.
1659     __ pop(rax);
1660     __ pop(rdx);
1661 #endif // _LP64
1662   }
1663 }
1664 
1665 void TemplateTable::ineg() {
1666   transition(itos, itos);
1667   __ negl(rax);
1668 }
1669 
1670 void TemplateTable::lneg() {
1671   transition(ltos, ltos);
1672   LP64_ONLY(__ negq(rax));
1673   NOT_LP64(__ lneg(rdx, rax));
1674 }
1675 
1676 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1677 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1678   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1679   // of 128-bits operands for SSE instructions.
1680   jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1681   // Store the value to a 128-bits operand.
1682   operand[0] = lo;
1683   operand[1] = hi;
1684   return operand;
1685 }
1686 
1687 // Buffer for 128-bits masks used by SSE instructions.
1688 static jlong float_signflip_pool[2*2];
1689 static jlong double_signflip_pool[2*2];
1690 
1691 void TemplateTable::fneg() {
1692   transition(ftos, ftos);
1693   if (UseSSE >= 1) {
1694     static jlong *float_signflip  = double_quadword(&float_signflip_pool[1],  CONST64(0x8000000080000000),  CONST64(0x8000000080000000));
1695     __ xorps(xmm0, ExternalAddress((address) float_signflip), rscratch1);
1696   } else {
1697     LP64_ONLY(ShouldNotReachHere());
1698     NOT_LP64(__ fchs());
1699   }
1700 }
1701 
1702 void TemplateTable::dneg() {
1703   transition(dtos, dtos);
1704   if (UseSSE >= 2) {
1705     static jlong *double_signflip =
1706       double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1707     __ xorpd(xmm0, ExternalAddress((address) double_signflip), rscratch1);
1708   } else {
1709 #ifdef _LP64
1710     ShouldNotReachHere();
1711 #else
1712     __ fchs();
1713 #endif
1714   }
1715 }
1716 
1717 void TemplateTable::iinc() {
1718   transition(vtos, vtos);
1719   __ load_signed_byte(rdx, at_bcp(2)); // get constant
1720   locals_index(rbx);
1721   __ addl(iaddress(rbx), rdx);
1722 }
1723 
1724 void TemplateTable::wide_iinc() {
1725   transition(vtos, vtos);
1726   __ movl(rdx, at_bcp(4)); // get constant
1727   locals_index_wide(rbx);
1728   __ bswapl(rdx); // swap bytes & sign-extend constant
1729   __ sarl(rdx, 16);
1730   __ addl(iaddress(rbx), rdx);
1731   // Note: should probably use only one movl to get both
1732   //       the index and the constant -> fix this
1733 }
1734 
1735 void TemplateTable::convert() {
1736 #ifdef _LP64
1737   // Checking
1738 #ifdef ASSERT
1739   {
1740     TosState tos_in  = ilgl;
1741     TosState tos_out = ilgl;
1742     switch (bytecode()) {
1743     case Bytecodes::_i2l: // fall through
1744     case Bytecodes::_i2f: // fall through
1745     case Bytecodes::_i2d: // fall through
1746     case Bytecodes::_i2b: // fall through
1747     case Bytecodes::_i2c: // fall through
1748     case Bytecodes::_i2s: tos_in = itos; break;
1749     case Bytecodes::_l2i: // fall through
1750     case Bytecodes::_l2f: // fall through
1751     case Bytecodes::_l2d: tos_in = ltos; break;
1752     case Bytecodes::_f2i: // fall through
1753     case Bytecodes::_f2l: // fall through
1754     case Bytecodes::_f2d: tos_in = ftos; break;
1755     case Bytecodes::_d2i: // fall through
1756     case Bytecodes::_d2l: // fall through
1757     case Bytecodes::_d2f: tos_in = dtos; break;
1758     default             : ShouldNotReachHere();
1759     }
1760     switch (bytecode()) {
1761     case Bytecodes::_l2i: // fall through
1762     case Bytecodes::_f2i: // fall through
1763     case Bytecodes::_d2i: // fall through
1764     case Bytecodes::_i2b: // fall through
1765     case Bytecodes::_i2c: // fall through
1766     case Bytecodes::_i2s: tos_out = itos; break;
1767     case Bytecodes::_i2l: // fall through
1768     case Bytecodes::_f2l: // fall through
1769     case Bytecodes::_d2l: tos_out = ltos; break;
1770     case Bytecodes::_i2f: // fall through
1771     case Bytecodes::_l2f: // fall through
1772     case Bytecodes::_d2f: tos_out = ftos; break;
1773     case Bytecodes::_i2d: // fall through
1774     case Bytecodes::_l2d: // fall through
1775     case Bytecodes::_f2d: tos_out = dtos; break;
1776     default             : ShouldNotReachHere();
1777     }
1778     transition(tos_in, tos_out);
1779   }
1780 #endif // ASSERT
1781 
1782   static const int64_t is_nan = 0x8000000000000000L;
1783 
1784   // Conversion
1785   switch (bytecode()) {
1786   case Bytecodes::_i2l:
1787     __ movslq(rax, rax);
1788     break;
1789   case Bytecodes::_i2f:
1790     __ cvtsi2ssl(xmm0, rax);
1791     break;
1792   case Bytecodes::_i2d:
1793     __ cvtsi2sdl(xmm0, rax);
1794     break;
1795   case Bytecodes::_i2b:
1796     __ movsbl(rax, rax);
1797     break;
1798   case Bytecodes::_i2c:
1799     __ movzwl(rax, rax);
1800     break;
1801   case Bytecodes::_i2s:
1802     __ movswl(rax, rax);
1803     break;
1804   case Bytecodes::_l2i:
1805     __ movl(rax, rax);
1806     break;
1807   case Bytecodes::_l2f:
1808     __ cvtsi2ssq(xmm0, rax);
1809     break;
1810   case Bytecodes::_l2d:
1811     __ cvtsi2sdq(xmm0, rax);
1812     break;
1813   case Bytecodes::_f2i:
1814   {
1815     Label L;
1816     __ cvttss2sil(rax, xmm0);
1817     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1818     __ jcc(Assembler::notEqual, L);
1819     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1820     __ bind(L);
1821   }
1822     break;
1823   case Bytecodes::_f2l:
1824   {
1825     Label L;
1826     __ cvttss2siq(rax, xmm0);
1827     // NaN or overflow/underflow?
1828     __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1);
1829     __ jcc(Assembler::notEqual, L);
1830     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1831     __ bind(L);
1832   }
1833     break;
1834   case Bytecodes::_f2d:
1835     __ cvtss2sd(xmm0, xmm0);
1836     break;
1837   case Bytecodes::_d2i:
1838   {
1839     Label L;
1840     __ cvttsd2sil(rax, xmm0);
1841     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1842     __ jcc(Assembler::notEqual, L);
1843     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1844     __ bind(L);
1845   }
1846     break;
1847   case Bytecodes::_d2l:
1848   {
1849     Label L;
1850     __ cvttsd2siq(rax, xmm0);
1851     // NaN or overflow/underflow?
1852     __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1);
1853     __ jcc(Assembler::notEqual, L);
1854     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1855     __ bind(L);
1856   }
1857     break;
1858   case Bytecodes::_d2f:
1859     __ cvtsd2ss(xmm0, xmm0);
1860     break;
1861   default:
1862     ShouldNotReachHere();
1863   }
1864 #else // !_LP64
1865   // Checking
1866 #ifdef ASSERT
1867   { TosState tos_in  = ilgl;
1868     TosState tos_out = ilgl;
1869     switch (bytecode()) {
1870       case Bytecodes::_i2l: // fall through
1871       case Bytecodes::_i2f: // fall through
1872       case Bytecodes::_i2d: // fall through
1873       case Bytecodes::_i2b: // fall through
1874       case Bytecodes::_i2c: // fall through
1875       case Bytecodes::_i2s: tos_in = itos; break;
1876       case Bytecodes::_l2i: // fall through
1877       case Bytecodes::_l2f: // fall through
1878       case Bytecodes::_l2d: tos_in = ltos; break;
1879       case Bytecodes::_f2i: // fall through
1880       case Bytecodes::_f2l: // fall through
1881       case Bytecodes::_f2d: tos_in = ftos; break;
1882       case Bytecodes::_d2i: // fall through
1883       case Bytecodes::_d2l: // fall through
1884       case Bytecodes::_d2f: tos_in = dtos; break;
1885       default             : ShouldNotReachHere();
1886     }
1887     switch (bytecode()) {
1888       case Bytecodes::_l2i: // fall through
1889       case Bytecodes::_f2i: // fall through
1890       case Bytecodes::_d2i: // fall through
1891       case Bytecodes::_i2b: // fall through
1892       case Bytecodes::_i2c: // fall through
1893       case Bytecodes::_i2s: tos_out = itos; break;
1894       case Bytecodes::_i2l: // fall through
1895       case Bytecodes::_f2l: // fall through
1896       case Bytecodes::_d2l: tos_out = ltos; break;
1897       case Bytecodes::_i2f: // fall through
1898       case Bytecodes::_l2f: // fall through
1899       case Bytecodes::_d2f: tos_out = ftos; break;
1900       case Bytecodes::_i2d: // fall through
1901       case Bytecodes::_l2d: // fall through
1902       case Bytecodes::_f2d: tos_out = dtos; break;
1903       default             : ShouldNotReachHere();
1904     }
1905     transition(tos_in, tos_out);
1906   }
1907 #endif // ASSERT
1908 
1909   // Conversion
1910   // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1911   switch (bytecode()) {
1912     case Bytecodes::_i2l:
1913       __ extend_sign(rdx, rax);
1914       break;
1915     case Bytecodes::_i2f:
1916       if (UseSSE >= 1) {
1917         __ cvtsi2ssl(xmm0, rax);
1918       } else {
1919         __ push(rax);          // store int on tos
1920         __ fild_s(at_rsp());   // load int to ST0
1921         __ f2ieee();           // truncate to float size
1922         __ pop(rcx);           // adjust rsp
1923       }
1924       break;
1925     case Bytecodes::_i2d:
1926       if (UseSSE >= 2) {
1927         __ cvtsi2sdl(xmm0, rax);
1928       } else {
1929       __ push(rax);          // add one slot for d2ieee()
1930       __ push(rax);          // store int on tos
1931       __ fild_s(at_rsp());   // load int to ST0
1932       __ d2ieee();           // truncate to double size
1933       __ pop(rcx);           // adjust rsp
1934       __ pop(rcx);
1935       }
1936       break;
1937     case Bytecodes::_i2b:
1938       __ shll(rax, 24);      // truncate upper 24 bits
1939       __ sarl(rax, 24);      // and sign-extend byte
1940       LP64_ONLY(__ movsbl(rax, rax));
1941       break;
1942     case Bytecodes::_i2c:
1943       __ andl(rax, 0xFFFF);  // truncate upper 16 bits
1944       LP64_ONLY(__ movzwl(rax, rax));
1945       break;
1946     case Bytecodes::_i2s:
1947       __ shll(rax, 16);      // truncate upper 16 bits
1948       __ sarl(rax, 16);      // and sign-extend short
1949       LP64_ONLY(__ movswl(rax, rax));
1950       break;
1951     case Bytecodes::_l2i:
1952       /* nothing to do */
1953       break;
1954     case Bytecodes::_l2f:
1955       // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
1956       // 64-bit long values to floats. On 32-bit platforms it is not possible
1957       // to use that instruction with 64-bit operands, therefore the FPU is
1958       // used to perform the conversion.
1959       __ push(rdx);          // store long on tos
1960       __ push(rax);
1961       __ fild_d(at_rsp());   // load long to ST0
1962       __ f2ieee();           // truncate to float size
1963       __ pop(rcx);           // adjust rsp
1964       __ pop(rcx);
1965       if (UseSSE >= 1) {
1966         __ push_f();
1967         __ pop_f(xmm0);
1968       }
1969       break;
1970     case Bytecodes::_l2d:
1971       // On 32-bit platforms the FPU is used for conversion because on
1972       // 32-bit platforms it is not not possible to use the cvtsi2sdq
1973       // instruction with 64-bit operands.
1974       __ push(rdx);          // store long on tos
1975       __ push(rax);
1976       __ fild_d(at_rsp());   // load long to ST0
1977       __ d2ieee();           // truncate to double size
1978       __ pop(rcx);           // adjust rsp
1979       __ pop(rcx);
1980       if (UseSSE >= 2) {
1981         __ push_d();
1982         __ pop_d(xmm0);
1983       }
1984       break;
1985     case Bytecodes::_f2i:
1986       // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
1987       // as it returns 0 for any NaN.
1988       if (UseSSE >= 1) {
1989         __ push_f(xmm0);
1990       } else {
1991         __ push(rcx);          // reserve space for argument
1992         __ fstp_s(at_rsp());   // pass float argument on stack
1993       }
1994       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1995       break;
1996     case Bytecodes::_f2l:
1997       // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
1998       // as it returns 0 for any NaN.
1999       if (UseSSE >= 1) {
2000        __ push_f(xmm0);
2001       } else {
2002         __ push(rcx);          // reserve space for argument
2003         __ fstp_s(at_rsp());   // pass float argument on stack
2004       }
2005       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2006       break;
2007     case Bytecodes::_f2d:
2008       if (UseSSE < 1) {
2009         /* nothing to do */
2010       } else if (UseSSE == 1) {
2011         __ push_f(xmm0);
2012         __ pop_f();
2013       } else { // UseSSE >= 2
2014         __ cvtss2sd(xmm0, xmm0);
2015       }
2016       break;
2017     case Bytecodes::_d2i:
2018       if (UseSSE >= 2) {
2019         __ push_d(xmm0);
2020       } else {
2021         __ push(rcx);          // reserve space for argument
2022         __ push(rcx);
2023         __ fstp_d(at_rsp());   // pass double argument on stack
2024       }
2025       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2026       break;
2027     case Bytecodes::_d2l:
2028       if (UseSSE >= 2) {
2029         __ push_d(xmm0);
2030       } else {
2031         __ push(rcx);          // reserve space for argument
2032         __ push(rcx);
2033         __ fstp_d(at_rsp());   // pass double argument on stack
2034       }
2035       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2036       break;
2037     case Bytecodes::_d2f:
2038       if (UseSSE <= 1) {
2039         __ push(rcx);          // reserve space for f2ieee()
2040         __ f2ieee();           // truncate to float size
2041         __ pop(rcx);           // adjust rsp
2042         if (UseSSE == 1) {
2043           // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2044           // the conversion is performed using the FPU in this case.
2045           __ push_f();
2046           __ pop_f(xmm0);
2047         }
2048       } else { // UseSSE >= 2
2049         __ cvtsd2ss(xmm0, xmm0);
2050       }
2051       break;
2052     default             :
2053       ShouldNotReachHere();
2054   }
2055 #endif // _LP64
2056 }
2057 
2058 void TemplateTable::lcmp() {
2059   transition(ltos, itos);
2060 #ifdef _LP64
2061   Label done;
2062   __ pop_l(rdx);
2063   __ cmpq(rdx, rax);
2064   __ movl(rax, -1);
2065   __ jccb(Assembler::less, done);
2066   __ setb(Assembler::notEqual, rax);
2067   __ movzbl(rax, rax);
2068   __ bind(done);
2069 #else
2070 
2071   // y = rdx:rax
2072   __ pop_l(rbx, rcx);             // get x = rcx:rbx
2073   __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2074   __ mov(rax, rcx);
2075 #endif
2076 }
2077 
2078 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2079   if ((is_float && UseSSE >= 1) ||
2080       (!is_float && UseSSE >= 2)) {
2081     Label done;
2082     if (is_float) {
2083       // XXX get rid of pop here, use ... reg, mem32
2084       __ pop_f(xmm1);
2085       __ ucomiss(xmm1, xmm0);
2086     } else {
2087       // XXX get rid of pop here, use ... reg, mem64
2088       __ pop_d(xmm1);
2089       __ ucomisd(xmm1, xmm0);
2090     }
2091     if (unordered_result < 0) {
2092       __ movl(rax, -1);
2093       __ jccb(Assembler::parity, done);
2094       __ jccb(Assembler::below, done);
2095       __ setb(Assembler::notEqual, rdx);
2096       __ movzbl(rax, rdx);
2097     } else {
2098       __ movl(rax, 1);
2099       __ jccb(Assembler::parity, done);
2100       __ jccb(Assembler::above, done);
2101       __ movl(rax, 0);
2102       __ jccb(Assembler::equal, done);
2103       __ decrementl(rax);
2104     }
2105     __ bind(done);
2106   } else {
2107 #ifdef _LP64
2108     ShouldNotReachHere();
2109 #else // !_LP64
2110     if (is_float) {
2111       __ fld_s(at_rsp());
2112     } else {
2113       __ fld_d(at_rsp());
2114       __ pop(rdx);
2115     }
2116     __ pop(rcx);
2117     __ fcmp2int(rax, unordered_result < 0);
2118 #endif // _LP64
2119   }
2120 }
2121 
2122 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2123   __ get_method(rcx); // rcx holds method
2124   __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2125                                      // holds bumped taken count
2126 
2127   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2128                              InvocationCounter::counter_offset();
2129   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2130                               InvocationCounter::counter_offset();
2131 
2132   // Load up edx with the branch displacement
2133   if (is_wide) {
2134     __ movl(rdx, at_bcp(1));
2135   } else {
2136     __ load_signed_short(rdx, at_bcp(1));
2137   }
2138   __ bswapl(rdx);
2139 
2140   if (!is_wide) {
2141     __ sarl(rdx, 16);
2142   }
2143   LP64_ONLY(__ movl2ptr(rdx, rdx));
2144 
2145   // Handle all the JSR stuff here, then exit.
2146   // It's much shorter and cleaner than intermingling with the non-JSR
2147   // normal-branch stuff occurring below.
2148   if (is_jsr) {
2149     // Pre-load the next target bytecode into rbx
2150     __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2151 
2152     // compute return address as bci in rax
2153     __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2154                         in_bytes(ConstMethod::codes_offset())));
2155     __ subptr(rax, Address(rcx, Method::const_offset()));
2156     // Adjust the bcp in r13 by the displacement in rdx
2157     __ addptr(rbcp, rdx);
2158     // jsr returns atos that is not an oop
2159     __ push_i(rax);
2160     __ dispatch_only(vtos, true);
2161     return;
2162   }
2163 
2164   // Normal (non-jsr) branch handling
2165 
2166   // Adjust the bcp in r13 by the displacement in rdx
2167   __ addptr(rbcp, rdx);
2168 
2169   assert(UseLoopCounter || !UseOnStackReplacement,
2170          "on-stack-replacement requires loop counters");
2171   Label backedge_counter_overflow;
2172   Label dispatch;
2173   if (UseLoopCounter) {
2174     // increment backedge counter for backward branches
2175     // rax: MDO
2176     // rbx: MDO bumped taken-count
2177     // rcx: method
2178     // rdx: target offset
2179     // r13: target bcp
2180     // r14: locals pointer
2181     __ testl(rdx, rdx);             // check if forward or backward branch
2182     __ jcc(Assembler::positive, dispatch); // count only if backward branch
2183 
2184     // check if MethodCounters exists
2185     Label has_counters;
2186     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2187     __ testptr(rax, rax);
2188     __ jcc(Assembler::notZero, has_counters);
2189     __ push(rdx);
2190     __ push(rcx);
2191     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2192                rcx);
2193     __ pop(rcx);
2194     __ pop(rdx);
2195     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2196     __ testptr(rax, rax);
2197     __ jcc(Assembler::zero, dispatch);
2198     __ bind(has_counters);
2199 
2200     Label no_mdo;
2201     if (ProfileInterpreter) {
2202       // Are we profiling?
2203       __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2204       __ testptr(rbx, rbx);
2205       __ jccb(Assembler::zero, no_mdo);
2206       // Increment the MDO backedge counter
2207       const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2208           in_bytes(InvocationCounter::counter_offset()));
2209       const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2210       __ increment_mask_and_jump(mdo_backedge_counter, mask, rax,
2211           UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2212       __ jmp(dispatch);
2213     }
2214     __ bind(no_mdo);
2215     // Increment backedge counter in MethodCounters*
2216     __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2217     const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2218     __ increment_mask_and_jump(Address(rcx, be_offset), mask, rax,
2219         UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2220     __ bind(dispatch);
2221   }
2222 
2223   // Pre-load the next target bytecode into rbx
2224   __ load_unsigned_byte(rbx, Address(rbcp, 0));
2225 
2226   // continue with the bytecode @ target
2227   // rax: return bci for jsr's, unused otherwise
2228   // rbx: target bytecode
2229   // r13: target bcp
2230   __ dispatch_only(vtos, true);
2231 
2232   if (UseLoopCounter) {
2233     if (UseOnStackReplacement) {
2234       Label set_mdp;
2235       // invocation counter overflow
2236       __ bind(backedge_counter_overflow);
2237       __ negptr(rdx);
2238       __ addptr(rdx, rbcp); // branch bcp
2239       // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2240       __ call_VM(noreg,
2241                  CAST_FROM_FN_PTR(address,
2242                                   InterpreterRuntime::frequency_counter_overflow),
2243                  rdx);
2244 
2245       // rax: osr nmethod (osr ok) or NULL (osr not possible)
2246       // rdx: scratch
2247       // r14: locals pointer
2248       // r13: bcp
2249       __ testptr(rax, rax);                        // test result
2250       __ jcc(Assembler::zero, dispatch);         // no osr if null
2251       // nmethod may have been invalidated (VM may block upon call_VM return)
2252       __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2253       __ jcc(Assembler::notEqual, dispatch);
2254 
2255       // We have the address of an on stack replacement routine in rax.
2256       // In preparation of invoking it, first we must migrate the locals
2257       // and monitors from off the interpreter frame on the stack.
2258       // Ensure to save the osr nmethod over the migration call,
2259       // it will be preserved in rbx.
2260       __ mov(rbx, rax);
2261 
2262       NOT_LP64(__ get_thread(rcx));
2263 
2264       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2265 
2266       // rax is OSR buffer, move it to expected parameter location
2267       LP64_ONLY(__ mov(j_rarg0, rax));
2268       NOT_LP64(__ mov(rcx, rax));
2269       // We use j_rarg definitions here so that registers don't conflict as parameter
2270       // registers change across platforms as we are in the midst of a calling
2271       // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2272 
2273       const Register retaddr   = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2274       const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2275 
2276       // pop the interpreter frame
2277       __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2278       __ leave();                                // remove frame anchor
2279       __ pop(retaddr);                           // get return address
2280       __ mov(rsp, sender_sp);                   // set sp to sender sp
2281       // Ensure compiled code always sees stack at proper alignment
2282       __ andptr(rsp, -(StackAlignmentInBytes));
2283 
2284       // unlike x86 we need no specialized return from compiled code
2285       // to the interpreter or the call stub.
2286 
2287       // push the return address
2288       __ push(retaddr);
2289 
2290       // and begin the OSR nmethod
2291       __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2292     }
2293   }
2294 }
2295 
2296 void TemplateTable::if_0cmp(Condition cc) {
2297   transition(itos, vtos);
2298   // assume branch is more often taken than not (loops use backward branches)
2299   Label not_taken;
2300   __ testl(rax, rax);
2301   __ jcc(j_not(cc), not_taken);
2302   branch(false, false);
2303   __ bind(not_taken);
2304   __ profile_not_taken_branch(rax);
2305 }
2306 
2307 void TemplateTable::if_icmp(Condition cc) {
2308   transition(itos, vtos);
2309   // assume branch is more often taken than not (loops use backward branches)
2310   Label not_taken;
2311   __ pop_i(rdx);
2312   __ cmpl(rdx, rax);
2313   __ jcc(j_not(cc), not_taken);
2314   branch(false, false);
2315   __ bind(not_taken);
2316   __ profile_not_taken_branch(rax);
2317 }
2318 
2319 void TemplateTable::if_nullcmp(Condition cc) {
2320   transition(atos, vtos);
2321   // assume branch is more often taken than not (loops use backward branches)
2322   Label not_taken;
2323   __ testptr(rax, rax);
2324   __ jcc(j_not(cc), not_taken);
2325   branch(false, false);
2326   __ bind(not_taken);
2327   __ profile_not_taken_branch(rax);
2328 }
2329 
2330 void TemplateTable::if_acmp(Condition cc) {
2331   transition(atos, vtos);
2332   // assume branch is more often taken than not (loops use backward branches)
2333   Label not_taken;
2334   __ pop_ptr(rdx);
2335   __ cmpoop(rdx, rax);
2336   __ jcc(j_not(cc), not_taken);
2337   branch(false, false);
2338   __ bind(not_taken);
2339   __ profile_not_taken_branch(rax);
2340 }
2341 
2342 void TemplateTable::ret() {
2343   transition(vtos, vtos);
2344   locals_index(rbx);
2345   LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2346   NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2347   __ profile_ret(rbx, rcx);
2348   __ get_method(rax);
2349   __ movptr(rbcp, Address(rax, Method::const_offset()));
2350   __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2351                       ConstMethod::codes_offset()));
2352   __ dispatch_next(vtos, 0, true);
2353 }
2354 
2355 void TemplateTable::wide_ret() {
2356   transition(vtos, vtos);
2357   locals_index_wide(rbx);
2358   __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2359   __ profile_ret(rbx, rcx);
2360   __ get_method(rax);
2361   __ movptr(rbcp, Address(rax, Method::const_offset()));
2362   __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2363   __ dispatch_next(vtos, 0, true);
2364 }
2365 
2366 void TemplateTable::tableswitch() {
2367   Label default_case, continue_execution;
2368   transition(itos, vtos);
2369 
2370   // align r13/rsi
2371   __ lea(rbx, at_bcp(BytesPerInt));
2372   __ andptr(rbx, -BytesPerInt);
2373   // load lo & hi
2374   __ movl(rcx, Address(rbx, BytesPerInt));
2375   __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2376   __ bswapl(rcx);
2377   __ bswapl(rdx);
2378   // check against lo & hi
2379   __ cmpl(rax, rcx);
2380   __ jcc(Assembler::less, default_case);
2381   __ cmpl(rax, rdx);
2382   __ jcc(Assembler::greater, default_case);
2383   // lookup dispatch offset
2384   __ subl(rax, rcx);
2385   __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2386   __ profile_switch_case(rax, rbx, rcx);
2387   // continue execution
2388   __ bind(continue_execution);
2389   __ bswapl(rdx);
2390   LP64_ONLY(__ movl2ptr(rdx, rdx));
2391   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2392   __ addptr(rbcp, rdx);
2393   __ dispatch_only(vtos, true);
2394   // handle default
2395   __ bind(default_case);
2396   __ profile_switch_default(rax);
2397   __ movl(rdx, Address(rbx, 0));
2398   __ jmp(continue_execution);
2399 }
2400 
2401 void TemplateTable::lookupswitch() {
2402   transition(itos, itos);
2403   __ stop("lookupswitch bytecode should have been rewritten");
2404 }
2405 
2406 void TemplateTable::fast_linearswitch() {
2407   transition(itos, vtos);
2408   Label loop_entry, loop, found, continue_execution;
2409   // bswap rax so we can avoid bswapping the table entries
2410   __ bswapl(rax);
2411   // align r13
2412   __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2413                                     // this instruction (change offsets
2414                                     // below)
2415   __ andptr(rbx, -BytesPerInt);
2416   // set counter
2417   __ movl(rcx, Address(rbx, BytesPerInt));
2418   __ bswapl(rcx);
2419   __ jmpb(loop_entry);
2420   // table search
2421   __ bind(loop);
2422   __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2423   __ jcc(Assembler::equal, found);
2424   __ bind(loop_entry);
2425   __ decrementl(rcx);
2426   __ jcc(Assembler::greaterEqual, loop);
2427   // default case
2428   __ profile_switch_default(rax);
2429   __ movl(rdx, Address(rbx, 0));
2430   __ jmp(continue_execution);
2431   // entry found -> get offset
2432   __ bind(found);
2433   __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2434   __ profile_switch_case(rcx, rax, rbx);
2435   // continue execution
2436   __ bind(continue_execution);
2437   __ bswapl(rdx);
2438   __ movl2ptr(rdx, rdx);
2439   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2440   __ addptr(rbcp, rdx);
2441   __ dispatch_only(vtos, true);
2442 }
2443 
2444 void TemplateTable::fast_binaryswitch() {
2445   transition(itos, vtos);
2446   // Implementation using the following core algorithm:
2447   //
2448   // int binary_search(int key, LookupswitchPair* array, int n) {
2449   //   // Binary search according to "Methodik des Programmierens" by
2450   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2451   //   int i = 0;
2452   //   int j = n;
2453   //   while (i+1 < j) {
2454   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2455   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2456   //     // where a stands for the array and assuming that the (inexisting)
2457   //     // element a[n] is infinitely big.
2458   //     int h = (i + j) >> 1;
2459   //     // i < h < j
2460   //     if (key < array[h].fast_match()) {
2461   //       j = h;
2462   //     } else {
2463   //       i = h;
2464   //     }
2465   //   }
2466   //   // R: a[i] <= key < a[i+1] or Q
2467   //   // (i.e., if key is within array, i is the correct index)
2468   //   return i;
2469   // }
2470 
2471   // Register allocation
2472   const Register key   = rax; // already set (tosca)
2473   const Register array = rbx;
2474   const Register i     = rcx;
2475   const Register j     = rdx;
2476   const Register h     = rdi;
2477   const Register temp  = rsi;
2478 
2479   // Find array start
2480   NOT_LP64(__ save_bcp());
2481 
2482   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2483                                           // get rid of this
2484                                           // instruction (change
2485                                           // offsets below)
2486   __ andptr(array, -BytesPerInt);
2487 
2488   // Initialize i & j
2489   __ xorl(i, i);                            // i = 0;
2490   __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2491 
2492   // Convert j into native byteordering
2493   __ bswapl(j);
2494 
2495   // And start
2496   Label entry;
2497   __ jmp(entry);
2498 
2499   // binary search loop
2500   {
2501     Label loop;
2502     __ bind(loop);
2503     // int h = (i + j) >> 1;
2504     __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2505     __ sarl(h, 1);                               // h = (i + j) >> 1;
2506     // if (key < array[h].fast_match()) {
2507     //   j = h;
2508     // } else {
2509     //   i = h;
2510     // }
2511     // Convert array[h].match to native byte-ordering before compare
2512     __ movl(temp, Address(array, h, Address::times_8));
2513     __ bswapl(temp);
2514     __ cmpl(key, temp);
2515     // j = h if (key <  array[h].fast_match())
2516     __ cmov32(Assembler::less, j, h);
2517     // i = h if (key >= array[h].fast_match())
2518     __ cmov32(Assembler::greaterEqual, i, h);
2519     // while (i+1 < j)
2520     __ bind(entry);
2521     __ leal(h, Address(i, 1)); // i+1
2522     __ cmpl(h, j);             // i+1 < j
2523     __ jcc(Assembler::less, loop);
2524   }
2525 
2526   // end of binary search, result index is i (must check again!)
2527   Label default_case;
2528   // Convert array[i].match to native byte-ordering before compare
2529   __ movl(temp, Address(array, i, Address::times_8));
2530   __ bswapl(temp);
2531   __ cmpl(key, temp);
2532   __ jcc(Assembler::notEqual, default_case);
2533 
2534   // entry found -> j = offset
2535   __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2536   __ profile_switch_case(i, key, array);
2537   __ bswapl(j);
2538   LP64_ONLY(__ movslq(j, j));
2539 
2540   NOT_LP64(__ restore_bcp());
2541   NOT_LP64(__ restore_locals());                           // restore rdi
2542 
2543   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2544   __ addptr(rbcp, j);
2545   __ dispatch_only(vtos, true);
2546 
2547   // default case -> j = default offset
2548   __ bind(default_case);
2549   __ profile_switch_default(i);
2550   __ movl(j, Address(array, -2 * BytesPerInt));
2551   __ bswapl(j);
2552   LP64_ONLY(__ movslq(j, j));
2553 
2554   NOT_LP64(__ restore_bcp());
2555   NOT_LP64(__ restore_locals());
2556 
2557   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2558   __ addptr(rbcp, j);
2559   __ dispatch_only(vtos, true);
2560 }
2561 
2562 void TemplateTable::_return(TosState state) {
2563   transition(state, state);
2564 
2565   assert(_desc->calls_vm(),
2566          "inconsistent calls_vm information"); // call in remove_activation
2567 
2568   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2569     assert(state == vtos, "only valid state");
2570     Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2571     __ movptr(robj, aaddress(0));
2572     __ load_klass(rdi, robj, rscratch1);
2573     __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2574     __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2575     Label skip_register_finalizer;
2576     __ jcc(Assembler::zero, skip_register_finalizer);
2577 
2578     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2579 
2580     __ bind(skip_register_finalizer);
2581   }
2582 
2583   if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2584     Label no_safepoint;
2585     NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2586 #ifdef _LP64
2587     __ testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2588 #else
2589     const Register thread = rdi;
2590     __ get_thread(thread);
2591     __ testb(Address(thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2592 #endif
2593     __ jcc(Assembler::zero, no_safepoint);
2594     __ push(state);
2595     __ push_cont_fastpath();
2596     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2597                                        InterpreterRuntime::at_safepoint));
2598     __ pop_cont_fastpath();
2599     __ pop(state);
2600     __ bind(no_safepoint);
2601   }
2602 
2603   // Narrow result if state is itos but result type is smaller.
2604   // Need to narrow in the return bytecode rather than in generate_return_entry
2605   // since compiled code callers expect the result to already be narrowed.
2606   if (state == itos) {
2607     __ narrow(rax);
2608   }
2609   __ remove_activation(state, rbcp);
2610 
2611   __ jmp(rbcp);
2612 }
2613 
2614 // ----------------------------------------------------------------------------
2615 // Volatile variables demand their effects be made known to all CPU's
2616 // in order.  Store buffers on most chips allow reads & writes to
2617 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2618 // without some kind of memory barrier (i.e., it's not sufficient that
2619 // the interpreter does not reorder volatile references, the hardware
2620 // also must not reorder them).
2621 //
2622 // According to the new Java Memory Model (JMM):
2623 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2624 //     writes act as acquire & release, so:
2625 // (2) A read cannot let unrelated NON-volatile memory refs that
2626 //     happen after the read float up to before the read.  It's OK for
2627 //     non-volatile memory refs that happen before the volatile read to
2628 //     float down below it.
2629 // (3) Similar a volatile write cannot let unrelated NON-volatile
2630 //     memory refs that happen BEFORE the write float down to after the
2631 //     write.  It's OK for non-volatile memory refs that happen after the
2632 //     volatile write to float up before it.
2633 //
2634 // We only put in barriers around volatile refs (they are expensive),
2635 // not _between_ memory refs (that would require us to track the
2636 // flavor of the previous memory refs).  Requirements (2) and (3)
2637 // require some barriers before volatile stores and after volatile
2638 // loads.  These nearly cover requirement (1) but miss the
2639 // volatile-store-volatile-load case.  This final case is placed after
2640 // volatile-stores although it could just as well go before
2641 // volatile-loads.
2642 
2643 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2644   // Helper function to insert a is-volatile test and memory barrier
2645   __ membar(order_constraint);
2646 }
2647 
2648 void TemplateTable::resolve_cache_and_index(int byte_no,
2649                                             Register cache,
2650                                             Register index,
2651                                             size_t index_size) {
2652   const Register temp = rbx;
2653   assert_different_registers(cache, index, temp);
2654 
2655   Label L_clinit_barrier_slow;
2656   Label resolved;
2657 
2658   Bytecodes::Code code = bytecode();
2659   switch (code) {
2660   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2661   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2662   default: break;
2663   }
2664 
2665   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2666   __ get_cache_and_index_and_bytecode_at_bcp(cache, index, temp, byte_no, 1, index_size);
2667   __ cmpl(temp, code);  // have we resolved this bytecode?
2668   __ jcc(Assembler::equal, resolved);
2669 
2670   // resolve first time through
2671   // Class initialization barrier slow path lands here as well.
2672   __ bind(L_clinit_barrier_slow);
2673   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2674   __ movl(temp, code);
2675   __ call_VM(noreg, entry, temp);
2676   // Update registers with resolved info
2677   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
2678 
2679   __ bind(resolved);
2680 
2681   // Class initialization barrier for static methods
2682   if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2683     const Register method = temp;
2684     const Register klass  = temp;
2685     const Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2686     assert(thread != noreg, "x86_32 not supported");
2687 
2688     __ load_resolved_method_at_index(byte_no, method, cache, index);
2689     __ load_method_holder(klass, method);
2690     __ clinit_barrier(klass, thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow);
2691   }
2692 }
2693 
2694 // The cache and index registers must be set before call
2695 void TemplateTable::load_field_cp_cache_entry(Register obj,
2696                                               Register cache,
2697                                               Register index,
2698                                               Register off,
2699                                               Register flags,
2700                                               bool is_static = false) {
2701   assert_different_registers(cache, index, flags, off);
2702 
2703   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2704   // Field offset
2705   __ movptr(off, Address(cache, index, Address::times_ptr,
2706                          in_bytes(cp_base_offset +
2707                                   ConstantPoolCacheEntry::f2_offset())));
2708   // Flags
2709   __ movl(flags, Address(cache, index, Address::times_ptr,
2710                          in_bytes(cp_base_offset +
2711                                   ConstantPoolCacheEntry::flags_offset())));
2712 
2713   // klass overwrite register
2714   if (is_static) {
2715     __ movptr(obj, Address(cache, index, Address::times_ptr,
2716                            in_bytes(cp_base_offset +
2717                                     ConstantPoolCacheEntry::f1_offset())));
2718     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2719     __ movptr(obj, Address(obj, mirror_offset));
2720     __ resolve_oop_handle(obj, rscratch2);
2721   }
2722 }
2723 
2724 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2725                                                Register method,
2726                                                Register itable_index,
2727                                                Register flags,
2728                                                bool is_invokevirtual,
2729                                                bool is_invokevfinal, /*unused*/
2730                                                bool is_invokedynamic) {
2731   // setup registers
2732   const Register cache = rcx;
2733   const Register index = rdx;
2734   assert_different_registers(method, flags);
2735   assert_different_registers(method, cache, index);
2736   assert_different_registers(itable_index, flags);
2737   assert_different_registers(itable_index, cache, index);
2738   // determine constant pool cache field offsets
2739   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2740   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2741                                     ConstantPoolCacheEntry::flags_offset());
2742   // access constant pool cache fields
2743   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2744                                     ConstantPoolCacheEntry::f2_offset());
2745 
2746   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2747   resolve_cache_and_index(byte_no, cache, index, index_size);
2748   __ load_resolved_method_at_index(byte_no, method, cache, index);
2749 
2750   if (itable_index != noreg) {
2751     // pick up itable or appendix index from f2 also:
2752     __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2753   }
2754   __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2755 }
2756 
2757 // The registers cache and index expected to be set before call.
2758 // Correct values of the cache and index registers are preserved.
2759 void TemplateTable::jvmti_post_field_access(Register cache,
2760                                             Register index,
2761                                             bool is_static,
2762                                             bool has_tos) {
2763   if (JvmtiExport::can_post_field_access()) {
2764     // Check to see if a field access watch has been set before we take
2765     // the time to call into the VM.
2766     Label L1;
2767     assert_different_registers(cache, index, rax);
2768     __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2769     __ testl(rax,rax);
2770     __ jcc(Assembler::zero, L1);
2771 
2772     // cache entry pointer
2773     __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2774     __ shll(index, LogBytesPerWord);
2775     __ addptr(cache, index);
2776     if (is_static) {
2777       __ xorptr(rax, rax);      // NULL object reference
2778     } else {
2779       __ pop(atos);         // Get the object
2780       __ verify_oop(rax);
2781       __ push(atos);        // Restore stack state
2782     }
2783     // rax,:   object pointer or NULL
2784     // cache: cache entry pointer
2785     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2786                rax, cache);
2787     __ get_cache_and_index_at_bcp(cache, index, 1);
2788     __ bind(L1);
2789   }
2790 }
2791 
2792 void TemplateTable::pop_and_check_object(Register r) {
2793   __ pop_ptr(r);
2794   __ null_check(r);  // for field access must check obj.
2795   __ verify_oop(r);
2796 }
2797 
2798 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2799   transition(vtos, vtos);
2800 
2801   const Register cache = rcx;
2802   const Register index = rdx;
2803   const Register obj   = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2804   const Register off   = rbx;
2805   const Register flags = rax;
2806   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
2807 
2808   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2809   jvmti_post_field_access(cache, index, is_static, false);
2810   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2811 
2812   if (!is_static) pop_and_check_object(obj);
2813 
2814   const Address field(obj, off, Address::times_1, 0*wordSize);
2815 
2816   Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj;
2817 
2818   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2819   // Make sure we don't need to mask edx after the above shift
2820   assert(btos == 0, "change code, btos != 0");
2821 
2822   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2823 
2824   __ jcc(Assembler::notZero, notByte);
2825   // btos
2826   __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
2827   __ push(btos);
2828   // Rewrite bytecode to be faster
2829   if (!is_static && rc == may_rewrite) {
2830     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2831   }
2832   __ jmp(Done);
2833 
2834   __ bind(notByte);
2835   __ cmpl(flags, ztos);
2836   __ jcc(Assembler::notEqual, notBool);
2837 
2838   // ztos (same code as btos)
2839   __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
2840   __ push(ztos);
2841   // Rewrite bytecode to be faster
2842   if (!is_static && rc == may_rewrite) {
2843     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2844     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2845   }
2846   __ jmp(Done);
2847 
2848   __ bind(notBool);
2849   __ cmpl(flags, atos);
2850   __ jcc(Assembler::notEqual, notObj);
2851   // atos
2852   do_oop_load(_masm, field, rax);
2853   __ push(atos);
2854   if (!is_static && rc == may_rewrite) {
2855     patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2856   }
2857   __ jmp(Done);
2858 
2859   __ bind(notObj);
2860   __ cmpl(flags, itos);
2861   __ jcc(Assembler::notEqual, notInt);
2862   // itos
2863   __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
2864   __ push(itos);
2865   // Rewrite bytecode to be faster
2866   if (!is_static && rc == may_rewrite) {
2867     patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2868   }
2869   __ jmp(Done);
2870 
2871   __ bind(notInt);
2872   __ cmpl(flags, ctos);
2873   __ jcc(Assembler::notEqual, notChar);
2874   // ctos
2875   __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
2876   __ push(ctos);
2877   // Rewrite bytecode to be faster
2878   if (!is_static && rc == may_rewrite) {
2879     patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2880   }
2881   __ jmp(Done);
2882 
2883   __ bind(notChar);
2884   __ cmpl(flags, stos);
2885   __ jcc(Assembler::notEqual, notShort);
2886   // stos
2887   __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
2888   __ push(stos);
2889   // Rewrite bytecode to be faster
2890   if (!is_static && rc == may_rewrite) {
2891     patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2892   }
2893   __ jmp(Done);
2894 
2895   __ bind(notShort);
2896   __ cmpl(flags, ltos);
2897   __ jcc(Assembler::notEqual, notLong);
2898   // ltos
2899     // Generate code as if volatile (x86_32).  There just aren't enough registers to
2900     // save that information and this code is faster than the test.
2901   __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
2902   __ push(ltos);
2903   // Rewrite bytecode to be faster
2904   LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
2905   __ jmp(Done);
2906 
2907   __ bind(notLong);
2908   __ cmpl(flags, ftos);
2909   __ jcc(Assembler::notEqual, notFloat);
2910   // ftos
2911 
2912   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2913   __ push(ftos);
2914   // Rewrite bytecode to be faster
2915   if (!is_static && rc == may_rewrite) {
2916     patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2917   }
2918   __ jmp(Done);
2919 
2920   __ bind(notFloat);
2921 #ifdef ASSERT
2922   Label notDouble;
2923   __ cmpl(flags, dtos);
2924   __ jcc(Assembler::notEqual, notDouble);
2925 #endif
2926   // dtos
2927   // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
2928   __ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg, noreg);
2929   __ push(dtos);
2930   // Rewrite bytecode to be faster
2931   if (!is_static && rc == may_rewrite) {
2932     patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2933   }
2934 #ifdef ASSERT
2935   __ jmp(Done);
2936 
2937   __ bind(notDouble);
2938   __ stop("Bad state");
2939 #endif
2940 
2941   __ bind(Done);
2942   // [jk] not needed currently
2943   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2944   //                                              Assembler::LoadStore));
2945 }
2946 
2947 void TemplateTable::getfield(int byte_no) {
2948   getfield_or_static(byte_no, false);
2949 }
2950 
2951 void TemplateTable::nofast_getfield(int byte_no) {
2952   getfield_or_static(byte_no, false, may_not_rewrite);
2953 }
2954 
2955 void TemplateTable::getstatic(int byte_no) {
2956   getfield_or_static(byte_no, true);
2957 }
2958 
2959 
2960 // The registers cache and index expected to be set before call.
2961 // The function may destroy various registers, just not the cache and index registers.
2962 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2963 
2964   const Register robj = LP64_ONLY(c_rarg2)   NOT_LP64(rax);
2965   const Register RBX  = LP64_ONLY(c_rarg1)   NOT_LP64(rbx);
2966   const Register RCX  = LP64_ONLY(c_rarg3)   NOT_LP64(rcx);
2967   const Register RDX  = LP64_ONLY(rscratch1) NOT_LP64(rdx);
2968 
2969   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2970 
2971   if (JvmtiExport::can_post_field_modification()) {
2972     // Check to see if a field modification watch has been set before
2973     // we take the time to call into the VM.
2974     Label L1;
2975     assert_different_registers(cache, index, rax);
2976     __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2977     __ testl(rax, rax);
2978     __ jcc(Assembler::zero, L1);
2979 
2980     __ get_cache_and_index_at_bcp(robj, RDX, 1);
2981 
2982 
2983     if (is_static) {
2984       // Life is simple.  Null out the object pointer.
2985       __ xorl(RBX, RBX);
2986 
2987     } else {
2988       // Life is harder. The stack holds the value on top, followed by
2989       // the object.  We don't know the size of the value, though; it
2990       // could be one or two words depending on its type. As a result,
2991       // we must find the type to determine where the object is.
2992 #ifndef _LP64
2993       Label two_word, valsize_known;
2994 #endif
2995       __ movl(RCX, Address(robj, RDX,
2996                            Address::times_ptr,
2997                            in_bytes(cp_base_offset +
2998                                      ConstantPoolCacheEntry::flags_offset())));
2999       NOT_LP64(__ mov(rbx, rsp));
3000       __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
3001 
3002       // Make sure we don't need to mask rcx after the above shift
3003       ConstantPoolCacheEntry::verify_tos_state_shift();
3004 #ifdef _LP64
3005       __ movptr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
3006       __ cmpl(c_rarg3, ltos);
3007       __ cmovptr(Assembler::equal,
3008                  c_rarg1, at_tos_p2()); // ltos (two word jvalue)
3009       __ cmpl(c_rarg3, dtos);
3010       __ cmovptr(Assembler::equal,
3011                  c_rarg1, at_tos_p2()); // dtos (two word jvalue)
3012 #else
3013       __ cmpl(rcx, ltos);
3014       __ jccb(Assembler::equal, two_word);
3015       __ cmpl(rcx, dtos);
3016       __ jccb(Assembler::equal, two_word);
3017       __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3018       __ jmpb(valsize_known);
3019 
3020       __ bind(two_word);
3021       __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3022 
3023       __ bind(valsize_known);
3024       // setup object pointer
3025       __ movptr(rbx, Address(rbx, 0));
3026 #endif
3027     }
3028     // cache entry pointer
3029     __ addptr(robj, in_bytes(cp_base_offset));
3030     __ shll(RDX, LogBytesPerWord);
3031     __ addptr(robj, RDX);
3032     // object (tos)
3033     __ mov(RCX, rsp);
3034     // c_rarg1: object pointer set up above (NULL if static)
3035     // c_rarg2: cache entry pointer
3036     // c_rarg3: jvalue object on the stack
3037     __ call_VM(noreg,
3038                CAST_FROM_FN_PTR(address,
3039                                 InterpreterRuntime::post_field_modification),
3040                RBX, robj, RCX);
3041     __ get_cache_and_index_at_bcp(cache, index, 1);
3042     __ bind(L1);
3043   }
3044 }
3045 
3046 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3047   transition(vtos, vtos);
3048 
3049   const Register cache = rcx;
3050   const Register index = rdx;
3051   const Register obj   = rcx;
3052   const Register off   = rbx;
3053   const Register flags = rax;
3054 
3055   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
3056   jvmti_post_field_mod(cache, index, is_static);
3057   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
3058 
3059   // [jk] not needed currently
3060   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3061   //                                              Assembler::StoreStore));
3062 
3063   Label notVolatile, Done;
3064   __ movl(rdx, flags);
3065   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3066   __ andl(rdx, 0x1);
3067 
3068   // Check for volatile store
3069   __ testl(rdx, rdx);
3070   __ jcc(Assembler::zero, notVolatile);
3071 
3072   putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3073   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3074                                                Assembler::StoreStore));
3075   __ jmp(Done);
3076   __ bind(notVolatile);
3077 
3078   putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3079 
3080   __ bind(Done);
3081 }
3082 
3083 void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
3084                                               Register obj, Register off, Register flags) {
3085 
3086   // field addresses
3087   const Address field(obj, off, Address::times_1, 0*wordSize);
3088   NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3089 
3090   Label notByte, notBool, notInt, notShort, notChar,
3091         notLong, notFloat, notObj;
3092   Label Done;
3093 
3094   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3095 
3096   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3097 
3098   assert(btos == 0, "change code, btos != 0");
3099   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3100   __ jcc(Assembler::notZero, notByte);
3101 
3102   // btos
3103   {
3104     __ pop(btos);
3105     if (!is_static) pop_and_check_object(obj);
3106     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg, noreg);
3107     if (!is_static && rc == may_rewrite) {
3108       patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3109     }
3110     __ jmp(Done);
3111   }
3112 
3113   __ bind(notByte);
3114   __ cmpl(flags, ztos);
3115   __ jcc(Assembler::notEqual, notBool);
3116 
3117   // ztos
3118   {
3119     __ pop(ztos);
3120     if (!is_static) pop_and_check_object(obj);
3121     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg, noreg);
3122     if (!is_static && rc == may_rewrite) {
3123       patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3124     }
3125     __ jmp(Done);
3126   }
3127 
3128   __ bind(notBool);
3129   __ cmpl(flags, atos);
3130   __ jcc(Assembler::notEqual, notObj);
3131 
3132   // atos
3133   {
3134     __ pop(atos);
3135     if (!is_static) pop_and_check_object(obj);
3136     // Store into the field
3137     do_oop_store(_masm, field, rax);
3138     if (!is_static && rc == may_rewrite) {
3139       patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3140     }
3141     __ jmp(Done);
3142   }
3143 
3144   __ bind(notObj);
3145   __ cmpl(flags, itos);
3146   __ jcc(Assembler::notEqual, notInt);
3147 
3148   // itos
3149   {
3150     __ pop(itos);
3151     if (!is_static) pop_and_check_object(obj);
3152     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg, noreg);
3153     if (!is_static && rc == may_rewrite) {
3154       patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3155     }
3156     __ jmp(Done);
3157   }
3158 
3159   __ bind(notInt);
3160   __ cmpl(flags, ctos);
3161   __ jcc(Assembler::notEqual, notChar);
3162 
3163   // ctos
3164   {
3165     __ pop(ctos);
3166     if (!is_static) pop_and_check_object(obj);
3167     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg, noreg);
3168     if (!is_static && rc == may_rewrite) {
3169       patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3170     }
3171     __ jmp(Done);
3172   }
3173 
3174   __ bind(notChar);
3175   __ cmpl(flags, stos);
3176   __ jcc(Assembler::notEqual, notShort);
3177 
3178   // stos
3179   {
3180     __ pop(stos);
3181     if (!is_static) pop_and_check_object(obj);
3182     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg, noreg);
3183     if (!is_static && rc == may_rewrite) {
3184       patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3185     }
3186     __ jmp(Done);
3187   }
3188 
3189   __ bind(notShort);
3190   __ cmpl(flags, ltos);
3191   __ jcc(Assembler::notEqual, notLong);
3192 
3193   // ltos
3194   {
3195     __ pop(ltos);
3196     if (!is_static) pop_and_check_object(obj);
3197     // MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
3198     __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg, noreg);
3199 #ifdef _LP64
3200     if (!is_static && rc == may_rewrite) {
3201       patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3202     }
3203 #endif // _LP64
3204     __ jmp(Done);
3205   }
3206 
3207   __ bind(notLong);
3208   __ cmpl(flags, ftos);
3209   __ jcc(Assembler::notEqual, notFloat);
3210 
3211   // ftos
3212   {
3213     __ pop(ftos);
3214     if (!is_static) pop_and_check_object(obj);
3215     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3216     if (!is_static && rc == may_rewrite) {
3217       patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3218     }
3219     __ jmp(Done);
3220   }
3221 
3222   __ bind(notFloat);
3223 #ifdef ASSERT
3224   Label notDouble;
3225   __ cmpl(flags, dtos);
3226   __ jcc(Assembler::notEqual, notDouble);
3227 #endif
3228 
3229   // dtos
3230   {
3231     __ pop(dtos);
3232     if (!is_static) pop_and_check_object(obj);
3233     // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3234     __ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg, noreg);
3235     if (!is_static && rc == may_rewrite) {
3236       patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3237     }
3238   }
3239 
3240 #ifdef ASSERT
3241   __ jmp(Done);
3242 
3243   __ bind(notDouble);
3244   __ stop("Bad state");
3245 #endif
3246 
3247   __ bind(Done);
3248 }
3249 
3250 void TemplateTable::putfield(int byte_no) {
3251   putfield_or_static(byte_no, false);
3252 }
3253 
3254 void TemplateTable::nofast_putfield(int byte_no) {
3255   putfield_or_static(byte_no, false, may_not_rewrite);
3256 }
3257 
3258 void TemplateTable::putstatic(int byte_no) {
3259   putfield_or_static(byte_no, true);
3260 }
3261 
3262 void TemplateTable::jvmti_post_fast_field_mod() {
3263 
3264   const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3265 
3266   if (JvmtiExport::can_post_field_modification()) {
3267     // Check to see if a field modification watch has been set before
3268     // we take the time to call into the VM.
3269     Label L2;
3270     __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3271     __ testl(scratch, scratch);
3272     __ jcc(Assembler::zero, L2);
3273     __ pop_ptr(rbx);                  // copy the object pointer from tos
3274     __ verify_oop(rbx);
3275     __ push_ptr(rbx);                 // put the object pointer back on tos
3276     // Save tos values before call_VM() clobbers them. Since we have
3277     // to do it for every data type, we use the saved values as the
3278     // jvalue object.
3279     switch (bytecode()) {          // load values into the jvalue object
3280     case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3281     case Bytecodes::_fast_bputfield: // fall through
3282     case Bytecodes::_fast_zputfield: // fall through
3283     case Bytecodes::_fast_sputfield: // fall through
3284     case Bytecodes::_fast_cputfield: // fall through
3285     case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3286     case Bytecodes::_fast_dputfield: __ push(dtos); break;
3287     case Bytecodes::_fast_fputfield: __ push(ftos); break;
3288     case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3289 
3290     default:
3291       ShouldNotReachHere();
3292     }
3293     __ mov(scratch, rsp);             // points to jvalue on the stack
3294     // access constant pool cache entry
3295     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3296     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3297     __ verify_oop(rbx);
3298     // rbx: object pointer copied above
3299     // c_rarg2: cache entry pointer
3300     // c_rarg3: jvalue object on the stack
3301     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3302     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3303 
3304     switch (bytecode()) {             // restore tos values
3305     case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3306     case Bytecodes::_fast_bputfield: // fall through
3307     case Bytecodes::_fast_zputfield: // fall through
3308     case Bytecodes::_fast_sputfield: // fall through
3309     case Bytecodes::_fast_cputfield: // fall through
3310     case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3311     case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3312     case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3313     case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3314     default: break;
3315     }
3316     __ bind(L2);
3317   }
3318 }
3319 
3320 void TemplateTable::fast_storefield(TosState state) {
3321   transition(state, vtos);
3322 
3323   ByteSize base = ConstantPoolCache::base_offset();
3324 
3325   jvmti_post_fast_field_mod();
3326 
3327   // access constant pool cache
3328   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3329 
3330   // test for volatile with rdx but rdx is tos register for lputfield.
3331   __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3332                        in_bytes(base +
3333                                 ConstantPoolCacheEntry::flags_offset())));
3334 
3335   // replace index with field offset from cache entry
3336   __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3337                          in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3338 
3339   // [jk] not needed currently
3340   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3341   //                                              Assembler::StoreStore));
3342 
3343   Label notVolatile, Done;
3344   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3345   __ andl(rdx, 0x1);
3346 
3347   // Get object from stack
3348   pop_and_check_object(rcx);
3349 
3350   // field address
3351   const Address field(rcx, rbx, Address::times_1);
3352 
3353   // Check for volatile store
3354   __ testl(rdx, rdx);
3355   __ jcc(Assembler::zero, notVolatile);
3356 
3357   fast_storefield_helper(field, rax);
3358   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3359                                                Assembler::StoreStore));
3360   __ jmp(Done);
3361   __ bind(notVolatile);
3362 
3363   fast_storefield_helper(field, rax);
3364 
3365   __ bind(Done);
3366 }
3367 
3368 void TemplateTable::fast_storefield_helper(Address field, Register rax) {
3369 
3370   // access field
3371   switch (bytecode()) {
3372   case Bytecodes::_fast_aputfield:
3373     do_oop_store(_masm, field, rax);
3374     break;
3375   case Bytecodes::_fast_lputfield:
3376 #ifdef _LP64
3377     __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg, noreg);
3378 #else
3379   __ stop("should not be rewritten");
3380 #endif
3381     break;
3382   case Bytecodes::_fast_iputfield:
3383     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg, noreg);
3384     break;
3385   case Bytecodes::_fast_zputfield:
3386     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg, noreg);
3387     break;
3388   case Bytecodes::_fast_bputfield:
3389     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg, noreg);
3390     break;
3391   case Bytecodes::_fast_sputfield:
3392     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg, noreg);
3393     break;
3394   case Bytecodes::_fast_cputfield:
3395     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg, noreg);
3396     break;
3397   case Bytecodes::_fast_fputfield:
3398     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg, noreg);
3399     break;
3400   case Bytecodes::_fast_dputfield:
3401     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg, noreg);
3402     break;
3403   default:
3404     ShouldNotReachHere();
3405   }
3406 }
3407 
3408 void TemplateTable::fast_accessfield(TosState state) {
3409   transition(atos, state);
3410 
3411   // Do the JVMTI work here to avoid disturbing the register state below
3412   if (JvmtiExport::can_post_field_access()) {
3413     // Check to see if a field access watch has been set before we
3414     // take the time to call into the VM.
3415     Label L1;
3416     __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3417     __ testl(rcx, rcx);
3418     __ jcc(Assembler::zero, L1);
3419     // access constant pool cache entry
3420     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3421     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3422     __ verify_oop(rax);
3423     __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
3424     LP64_ONLY(__ mov(c_rarg1, rax));
3425     // c_rarg1: object pointer copied above
3426     // c_rarg2: cache entry pointer
3427     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3428     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3429     __ pop_ptr(rax); // restore object pointer
3430     __ bind(L1);
3431   }
3432 
3433   // access constant pool cache
3434   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3435   // replace index with field offset from cache entry
3436   // [jk] not needed currently
3437   // __ movl(rdx, Address(rcx, rbx, Address::times_8,
3438   //                      in_bytes(ConstantPoolCache::base_offset() +
3439   //                               ConstantPoolCacheEntry::flags_offset())));
3440   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3441   // __ andl(rdx, 0x1);
3442   //
3443   __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3444                          in_bytes(ConstantPoolCache::base_offset() +
3445                                   ConstantPoolCacheEntry::f2_offset())));
3446 
3447   // rax: object
3448   __ verify_oop(rax);
3449   __ null_check(rax);
3450   Address field(rax, rbx, Address::times_1);
3451 
3452   // access field
3453   switch (bytecode()) {
3454   case Bytecodes::_fast_agetfield:
3455     do_oop_load(_masm, field, rax);
3456     __ verify_oop(rax);
3457     break;
3458   case Bytecodes::_fast_lgetfield:
3459 #ifdef _LP64
3460     __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
3461 #else
3462   __ stop("should not be rewritten");
3463 #endif
3464     break;
3465   case Bytecodes::_fast_igetfield:
3466     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3467     break;
3468   case Bytecodes::_fast_bgetfield:
3469     __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3470     break;
3471   case Bytecodes::_fast_sgetfield:
3472     __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3473     break;
3474   case Bytecodes::_fast_cgetfield:
3475     __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3476     break;
3477   case Bytecodes::_fast_fgetfield:
3478     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3479     break;
3480   case Bytecodes::_fast_dgetfield:
3481     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3482     break;
3483   default:
3484     ShouldNotReachHere();
3485   }
3486   // [jk] not needed currently
3487   //   Label notVolatile;
3488   //   __ testl(rdx, rdx);
3489   //   __ jcc(Assembler::zero, notVolatile);
3490   //   __ membar(Assembler::LoadLoad);
3491   //   __ bind(notVolatile);
3492 }
3493 
3494 void TemplateTable::fast_xaccess(TosState state) {
3495   transition(vtos, state);
3496 
3497   // get receiver
3498   __ movptr(rax, aaddress(0));
3499   // access constant pool cache
3500   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3501   __ movptr(rbx,
3502             Address(rcx, rdx, Address::times_ptr,
3503                     in_bytes(ConstantPoolCache::base_offset() +
3504                              ConstantPoolCacheEntry::f2_offset())));
3505   // make sure exception is reported in correct bcp range (getfield is
3506   // next instruction)
3507   __ increment(rbcp);
3508   __ null_check(rax);
3509   const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3510   switch (state) {
3511   case itos:
3512     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3513     break;
3514   case atos:
3515     do_oop_load(_masm, field, rax);
3516     __ verify_oop(rax);
3517     break;
3518   case ftos:
3519     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3520     break;
3521   default:
3522     ShouldNotReachHere();
3523   }
3524 
3525   // [jk] not needed currently
3526   // Label notVolatile;
3527   // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3528   //                      in_bytes(ConstantPoolCache::base_offset() +
3529   //                               ConstantPoolCacheEntry::flags_offset())));
3530   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3531   // __ testl(rdx, 0x1);
3532   // __ jcc(Assembler::zero, notVolatile);
3533   // __ membar(Assembler::LoadLoad);
3534   // __ bind(notVolatile);
3535 
3536   __ decrement(rbcp);
3537 }
3538 
3539 //-----------------------------------------------------------------------------
3540 // Calls
3541 
3542 void TemplateTable::prepare_invoke(int byte_no,
3543                                    Register method,  // linked method (or i-klass)
3544                                    Register index,   // itable index, MethodType, etc.
3545                                    Register recv,    // if caller wants to see it
3546                                    Register flags    // if caller wants to test it
3547                                    ) {
3548   // determine flags
3549   const Bytecodes::Code code = bytecode();
3550   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3551   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3552   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3553   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3554   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3555   const bool load_receiver       = (recv  != noreg);
3556   const bool save_flags          = (flags != noreg);
3557   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3558   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3559   assert(flags == noreg || flags == rdx, "");
3560   assert(recv  == noreg || recv  == rcx, "");
3561 
3562   // setup registers & access constant pool cache
3563   if (recv  == noreg)  recv  = rcx;
3564   if (flags == noreg)  flags = rdx;
3565   assert_different_registers(method, index, recv, flags);
3566 
3567   // save 'interpreter return address'
3568   __ save_bcp();
3569 
3570   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3571 
3572   // maybe push appendix to arguments (just before return address)
3573   if (is_invokedynamic || is_invokehandle) {
3574     Label L_no_push;
3575     __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3576     __ jcc(Assembler::zero, L_no_push);
3577     // Push the appendix as a trailing parameter.
3578     // This must be done before we get the receiver,
3579     // since the parameter_size includes it.
3580     __ push(rbx);
3581     __ mov(rbx, index);
3582     __ load_resolved_reference_at_index(index, rbx);
3583     __ pop(rbx);
3584     __ push(index);  // push appendix (MethodType, CallSite, etc.)
3585     __ bind(L_no_push);
3586   }
3587 
3588   // load receiver if needed (after appendix is pushed so parameter size is correct)
3589   // Note: no return address pushed yet
3590   if (load_receiver) {
3591     __ movl(recv, flags);
3592     __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3593     const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
3594     const int receiver_is_at_end      = -1;  // back off one slot to get receiver
3595     Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3596     __ movptr(recv, recv_addr);
3597     __ verify_oop(recv);
3598   }
3599 
3600   if (save_flags) {
3601     __ movl(rbcp, flags);
3602   }
3603 
3604   // compute return type
3605   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3606   // Make sure we don't need to mask flags after the above shift
3607   ConstantPoolCacheEntry::verify_tos_state_shift();
3608   // load return address
3609   {
3610     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3611     ExternalAddress table(table_addr);
3612 #ifdef _LP64
3613     __ lea(rscratch1, table);
3614     __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
3615 #else
3616     __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
3617 #endif // _LP64
3618   }
3619 
3620   // push return address
3621   __ push(flags);
3622 
3623   // Restore flags value from the constant pool cache, and restore rsi
3624   // for later null checks.  r13 is the bytecode pointer
3625   if (save_flags) {
3626     __ movl(flags, rbcp);
3627     __ restore_bcp();
3628   }
3629 }
3630 
3631 void TemplateTable::invokevirtual_helper(Register index,
3632                                          Register recv,
3633                                          Register flags) {
3634   // Uses temporary registers rax, rdx
3635   assert_different_registers(index, recv, rax, rdx);
3636   assert(index == rbx, "");
3637   assert(recv  == rcx, "");
3638 
3639   // Test for an invoke of a final method
3640   Label notFinal;
3641   __ movl(rax, flags);
3642   __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3643   __ jcc(Assembler::zero, notFinal);
3644 
3645   const Register method = index;  // method must be rbx
3646   assert(method == rbx,
3647          "Method* must be rbx for interpreter calling convention");
3648 
3649   // do the call - the index is actually the method to call
3650   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3651 
3652   // It's final, need a null check here!
3653   __ null_check(recv);
3654 
3655   // profile this call
3656   __ profile_final_call(rax);
3657   __ profile_arguments_type(rax, method, rbcp, true);
3658 
3659   __ jump_from_interpreted(method, rax);
3660 
3661   __ bind(notFinal);
3662 
3663   // get receiver klass
3664   __ load_klass_check_null(rax, recv, rscratch1);
3665 
3666   // profile this call
3667   __ profile_virtual_call(rax, rlocals, rdx);
3668   // get target Method* & entry point
3669   __ lookup_virtual_method(rax, index, method);
3670 
3671   __ profile_arguments_type(rdx, method, rbcp, true);
3672   __ jump_from_interpreted(method, rdx);
3673 }
3674 
3675 void TemplateTable::invokevirtual(int byte_no) {
3676   transition(vtos, vtos);
3677   assert(byte_no == f2_byte, "use this argument");
3678   prepare_invoke(byte_no,
3679                  rbx,    // method or vtable index
3680                  noreg,  // unused itable index
3681                  rcx, rdx); // recv, flags
3682 
3683   // rbx: index
3684   // rcx: receiver
3685   // rdx: flags
3686 
3687   invokevirtual_helper(rbx, rcx, rdx);
3688 }
3689 
3690 void TemplateTable::invokespecial(int byte_no) {
3691   transition(vtos, vtos);
3692   assert(byte_no == f1_byte, "use this argument");
3693   prepare_invoke(byte_no, rbx, noreg,  // get f1 Method*
3694                  rcx);  // get receiver also for null check
3695   __ verify_oop(rcx);
3696   __ null_check(rcx);
3697   // do the call
3698   __ profile_call(rax);
3699   __ profile_arguments_type(rax, rbx, rbcp, false);
3700   __ jump_from_interpreted(rbx, rax);
3701 }
3702 
3703 void TemplateTable::invokestatic(int byte_no) {
3704   transition(vtos, vtos);
3705   assert(byte_no == f1_byte, "use this argument");
3706   prepare_invoke(byte_no, rbx);  // get f1 Method*
3707   // do the call
3708   __ profile_call(rax);
3709   __ profile_arguments_type(rax, rbx, rbcp, false);
3710   __ jump_from_interpreted(rbx, rax);
3711 }
3712 
3713 
3714 void TemplateTable::fast_invokevfinal(int byte_no) {
3715   transition(vtos, vtos);
3716   assert(byte_no == f2_byte, "use this argument");
3717   __ stop("fast_invokevfinal not used on x86");
3718 }
3719 
3720 
3721 void TemplateTable::invokeinterface(int byte_no) {
3722   transition(vtos, vtos);
3723   assert(byte_no == f1_byte, "use this argument");
3724   prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 Method*
3725                  rcx, rdx); // recv, flags
3726 
3727   // rax: reference klass (from f1) if interface method
3728   // rbx: method (from f2)
3729   // rcx: receiver
3730   // rdx: flags
3731 
3732   // First check for Object case, then private interface method,
3733   // then regular interface method.
3734 
3735   // Special case of invokeinterface called for virtual method of
3736   // java.lang.Object.  See cpCache.cpp for details.
3737   Label notObjectMethod;
3738   __ movl(rlocals, rdx);
3739   __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3740   __ jcc(Assembler::zero, notObjectMethod);
3741   invokevirtual_helper(rbx, rcx, rdx);
3742   // no return from above
3743   __ bind(notObjectMethod);
3744 
3745   Label no_such_interface; // for receiver subtype check
3746   Register recvKlass; // used for exception processing
3747 
3748   // Check for private method invocation - indicated by vfinal
3749   Label notVFinal;
3750   __ movl(rlocals, rdx);
3751   __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3752   __ jcc(Assembler::zero, notVFinal);
3753 
3754   // Get receiver klass into rlocals - also a null check
3755   __ load_klass_check_null(rlocals, rcx, rscratch1);
3756 
3757   Label subtype;
3758   __ check_klass_subtype(rlocals, rax, rbcp, subtype);
3759   // If we get here the typecheck failed
3760   recvKlass = rdx;
3761   __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
3762   __ jmp(no_such_interface);
3763 
3764   __ bind(subtype);
3765 
3766   // do the call - rbx is actually the method to call
3767 
3768   __ profile_final_call(rdx);
3769   __ profile_arguments_type(rdx, rbx, rbcp, true);
3770 
3771   __ jump_from_interpreted(rbx, rdx);
3772   // no return from above
3773   __ bind(notVFinal);
3774 
3775   // Get receiver klass into rdx - also a null check
3776   __ restore_locals();  // restore r14
3777   __ load_klass_check_null(rdx, rcx, rscratch1);
3778 
3779   Label no_such_method;
3780 
3781   // Preserve method for throw_AbstractMethodErrorVerbose.
3782   __ mov(rcx, rbx);
3783   // Receiver subtype check against REFC.
3784   // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
3785   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3786                              rdx, rax, noreg,
3787                              // outputs: scan temp. reg, scan temp. reg
3788                              rbcp, rlocals,
3789                              no_such_interface,
3790                              /*return_method=*/false);
3791 
3792   // profile this call
3793   __ restore_bcp(); // rbcp was destroyed by receiver type check
3794   __ profile_virtual_call(rdx, rbcp, rlocals);
3795 
3796   // Get declaring interface class from method, and itable index
3797   __ load_method_holder(rax, rbx);
3798   __ movl(rbx, Address(rbx, Method::itable_index_offset()));
3799   __ subl(rbx, Method::itable_index_max);
3800   __ negl(rbx);
3801 
3802   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3803   __ mov(rlocals, rdx);
3804   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3805                              rlocals, rax, rbx,
3806                              // outputs: method, scan temp. reg
3807                              rbx, rbcp,
3808                              no_such_interface);
3809 
3810   // rbx: Method* to call
3811   // rcx: receiver
3812   // Check for abstract method error
3813   // Note: This should be done more efficiently via a throw_abstract_method_error
3814   //       interpreter entry point and a conditional jump to it in case of a null
3815   //       method.
3816   __ testptr(rbx, rbx);
3817   __ jcc(Assembler::zero, no_such_method);
3818 
3819   __ profile_arguments_type(rdx, rbx, rbcp, true);
3820 
3821   // do the call
3822   // rcx: receiver
3823   // rbx,: Method*
3824   __ jump_from_interpreted(rbx, rdx);
3825   __ should_not_reach_here();
3826 
3827   // exception handling code follows...
3828   // note: must restore interpreter registers to canonical
3829   //       state for exception handling to work correctly!
3830 
3831   __ bind(no_such_method);
3832   // throw exception
3833   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3834   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
3835   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3836   // Pass arguments for generating a verbose error message.
3837 #ifdef _LP64
3838   recvKlass = c_rarg1;
3839   Register method    = c_rarg2;
3840   if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
3841   if (method != rcx)    { __ movq(method, rcx);    }
3842 #else
3843   recvKlass = rdx;
3844   Register method    = rcx;
3845 #endif
3846   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3847              recvKlass, method);
3848   // The call_VM checks for exception, so we should never return here.
3849   __ should_not_reach_here();
3850 
3851   __ bind(no_such_interface);
3852   // throw exception
3853   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3854   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
3855   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3856   // Pass arguments for generating a verbose error message.
3857   LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
3858   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3859              recvKlass, rax);
3860   // the call_VM checks for exception, so we should never return here.
3861   __ should_not_reach_here();
3862 }
3863 
3864 void TemplateTable::invokehandle(int byte_no) {
3865   transition(vtos, vtos);
3866   assert(byte_no == f1_byte, "use this argument");
3867   const Register rbx_method = rbx;
3868   const Register rax_mtype  = rax;
3869   const Register rcx_recv   = rcx;
3870   const Register rdx_flags  = rdx;
3871 
3872   prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3873   __ verify_method_ptr(rbx_method);
3874   __ verify_oop(rcx_recv);
3875   __ null_check(rcx_recv);
3876 
3877   // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3878   // rbx: MH.invokeExact_MT method (from f2)
3879 
3880   // Note:  rax_mtype is already pushed (if necessary) by prepare_invoke
3881 
3882   // FIXME: profile the LambdaForm also
3883   __ profile_final_call(rax);
3884   __ profile_arguments_type(rdx, rbx_method, rbcp, true);
3885 
3886   __ jump_from_interpreted(rbx_method, rdx);
3887 }
3888 
3889 void TemplateTable::invokedynamic(int byte_no) {
3890   transition(vtos, vtos);
3891   assert(byte_no == f1_byte, "use this argument");
3892 
3893   const Register rbx_method   = rbx;
3894   const Register rax_callsite = rax;
3895 
3896   prepare_invoke(byte_no, rbx_method, rax_callsite);
3897 
3898   // rax: CallSite object (from cpool->resolved_references[f1])
3899   // rbx: MH.linkToCallSite method (from f2)
3900 
3901   // Note:  rax_callsite is already pushed by prepare_invoke
3902 
3903   // %%% should make a type profile for any invokedynamic that takes a ref argument
3904   // profile this call
3905   __ profile_call(rbcp);
3906   __ profile_arguments_type(rdx, rbx_method, rbcp, false);
3907 
3908   __ verify_oop(rax_callsite);
3909 
3910   __ jump_from_interpreted(rbx_method, rdx);
3911 }
3912 
3913 //-----------------------------------------------------------------------------
3914 // Allocation
3915 
3916 void TemplateTable::_new() {
3917   transition(vtos, atos);
3918   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3919   Label slow_case;
3920   Label slow_case_no_pop;
3921   Label done;
3922   Label initialize_header;
3923 
3924   __ get_cpool_and_tags(rcx, rax);
3925 
3926   // Make sure the class we're about to instantiate has been resolved.
3927   // This is done before loading InstanceKlass to be consistent with the order
3928   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3929   const int tags_offset = Array<u1>::base_offset_in_bytes();
3930   __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3931   __ jcc(Assembler::notEqual, slow_case_no_pop);
3932 
3933   // get InstanceKlass
3934   __ load_resolved_klass_at_index(rcx, rcx, rdx);
3935   __ push(rcx);  // save the contexts of klass for initializing the header
3936 
3937   // make sure klass is initialized & doesn't have finalizer
3938   // make sure klass is fully initialized
3939   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3940   __ jcc(Assembler::notEqual, slow_case);
3941 
3942   // get instance_size in InstanceKlass (scaled to a count of bytes)
3943   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3944   // test to see if it has a finalizer or is malformed in some way
3945   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3946   __ jcc(Assembler::notZero, slow_case);
3947 
3948   // Allocate the instance:
3949   //  If TLAB is enabled:
3950   //    Try to allocate in the TLAB.
3951   //    If fails, go to the slow path.
3952   //    Initialize the allocation.
3953   //    Exit.
3954   //
3955   //  Go to slow path.
3956 
3957   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
3958 
3959   if (UseTLAB) {
3960     NOT_LP64(__ get_thread(thread);)
3961     __ tlab_allocate(thread, rax, rdx, 0, rcx, rbx, slow_case);
3962     if (ZeroTLAB) {
3963       // the fields have been already cleared
3964       __ jmp(initialize_header);
3965     }
3966 
3967     // The object is initialized before the header.  If the object size is
3968     // zero, go directly to the header initialization.
3969     __ decrement(rdx, sizeof(oopDesc));
3970     __ jcc(Assembler::zero, initialize_header);
3971 
3972     // Initialize topmost object field, divide rdx by 8, check if odd and
3973     // test if zero.
3974     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
3975     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3976 
3977     // rdx must have been multiple of 8
3978 #ifdef ASSERT
3979     // make sure rdx was multiple of 8
3980     Label L;
3981     // Ignore partial flag stall after shrl() since it is debug VM
3982     __ jcc(Assembler::carryClear, L);
3983     __ stop("object size is not multiple of 2 - adjust this code");
3984     __ bind(L);
3985     // rdx must be > 0, no extra check needed here
3986 #endif
3987 
3988     // initialize remaining object fields: rdx was a multiple of 8
3989     { Label loop;
3990     __ bind(loop);
3991     __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3992     NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3993     __ decrement(rdx);
3994     __ jcc(Assembler::notZero, loop);
3995     }
3996 
3997     // initialize object header only.
3998     __ bind(initialize_header);
3999     __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
4000               (intptr_t)markWord::prototype().value()); // header
4001     __ pop(rcx);   // get saved klass back in the register.
4002 #ifdef _LP64
4003     __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4004     __ store_klass_gap(rax, rsi);  // zero klass gap for compressed oops
4005 #endif
4006     __ store_klass(rax, rcx, rscratch1);  // klass
4007 
4008     {
4009       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0, rscratch1);
4010       // Trigger dtrace event for fastpath
4011       __ push(atos);
4012       __ call_VM_leaf(
4013            CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), rax);
4014       __ pop(atos);
4015     }
4016 
4017     __ jmp(done);
4018   }
4019 
4020   // slow case
4021   __ bind(slow_case);
4022   __ pop(rcx);   // restore stack pointer to what it was when we came in.
4023   __ bind(slow_case_no_pop);
4024 
4025   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4026   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4027 
4028   __ get_constant_pool(rarg1);
4029   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4030   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4031    __ verify_oop(rax);
4032 
4033   // continue
4034   __ bind(done);
4035 }
4036 
4037 void TemplateTable::newarray() {
4038   transition(itos, atos);
4039   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4040   __ load_unsigned_byte(rarg1, at_bcp(1));
4041   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4042           rarg1, rax);
4043 }
4044 
4045 void TemplateTable::anewarray() {
4046   transition(itos, atos);
4047 
4048   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4049   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4050 
4051   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4052   __ get_constant_pool(rarg1);
4053   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4054           rarg1, rarg2, rax);
4055 }
4056 
4057 void TemplateTable::arraylength() {
4058   transition(atos, itos);
4059   __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
4060   __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4061 }
4062 
4063 void TemplateTable::checkcast() {
4064   transition(atos, atos);
4065   Label done, is_null, ok_is_subtype, quicked, resolved;
4066   __ testptr(rax, rax); // object is in rax
4067   __ jcc(Assembler::zero, is_null);
4068 
4069   // Get cpool & tags index
4070   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4071   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4072   // See if bytecode has already been quicked
4073   __ cmpb(Address(rdx, rbx,
4074                   Address::times_1,
4075                   Array<u1>::base_offset_in_bytes()),
4076           JVM_CONSTANT_Class);
4077   __ jcc(Assembler::equal, quicked);
4078   __ push(atos); // save receiver for result, and for GC
4079   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4080 
4081   // vm_result_2 has metadata result
4082 #ifndef _LP64
4083   // borrow rdi from locals
4084   __ get_thread(rdi);
4085   __ get_vm_result_2(rax, rdi);
4086   __ restore_locals();
4087 #else
4088   __ get_vm_result_2(rax, r15_thread);
4089 #endif
4090 
4091   __ pop_ptr(rdx); // restore receiver
4092   __ jmpb(resolved);
4093 
4094   // Get superklass in rax and subklass in rbx
4095   __ bind(quicked);
4096   __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4097   __ load_resolved_klass_at_index(rax, rcx, rbx);
4098 
4099   __ bind(resolved);
4100   __ load_klass(rbx, rdx, rscratch1);
4101 
4102   // Generate subtype check.  Blows rcx, rdi.  Object in rdx.
4103   // Superklass in rax.  Subklass in rbx.
4104   __ gen_subtype_check(rbx, ok_is_subtype);
4105 
4106   // Come here on failure
4107   __ push_ptr(rdx);
4108   // object is at TOS
4109   __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4110 
4111   // Come here on success
4112   __ bind(ok_is_subtype);
4113   __ mov(rax, rdx); // Restore object in rdx
4114 
4115   // Collect counts on whether this check-cast sees NULLs a lot or not.
4116   if (ProfileInterpreter) {
4117     __ jmp(done);
4118     __ bind(is_null);
4119     __ profile_null_seen(rcx);
4120   } else {
4121     __ bind(is_null);   // same as 'done'
4122   }
4123   __ bind(done);
4124 }
4125 
4126 void TemplateTable::instanceof() {
4127   transition(atos, itos);
4128   Label done, is_null, ok_is_subtype, quicked, resolved;
4129   __ testptr(rax, rax);
4130   __ jcc(Assembler::zero, is_null);
4131 
4132   // Get cpool & tags index
4133   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4134   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4135   // See if bytecode has already been quicked
4136   __ cmpb(Address(rdx, rbx,
4137                   Address::times_1,
4138                   Array<u1>::base_offset_in_bytes()),
4139           JVM_CONSTANT_Class);
4140   __ jcc(Assembler::equal, quicked);
4141 
4142   __ push(atos); // save receiver for result, and for GC
4143   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4144   // vm_result_2 has metadata result
4145 
4146 #ifndef _LP64
4147   // borrow rdi from locals
4148   __ get_thread(rdi);
4149   __ get_vm_result_2(rax, rdi);
4150   __ restore_locals();
4151 #else
4152   __ get_vm_result_2(rax, r15_thread);
4153 #endif
4154 
4155   __ pop_ptr(rdx); // restore receiver
4156   __ verify_oop(rdx);
4157   __ load_klass(rdx, rdx, rscratch1);
4158   __ jmpb(resolved);
4159 
4160   // Get superklass in rax and subklass in rdx
4161   __ bind(quicked);
4162   __ load_klass(rdx, rax, rscratch1);
4163   __ load_resolved_klass_at_index(rax, rcx, rbx);
4164 
4165   __ bind(resolved);
4166 
4167   // Generate subtype check.  Blows rcx, rdi
4168   // Superklass in rax.  Subklass in rdx.
4169   __ gen_subtype_check(rdx, ok_is_subtype);
4170 
4171   // Come here on failure
4172   __ xorl(rax, rax);
4173   __ jmpb(done);
4174   // Come here on success
4175   __ bind(ok_is_subtype);
4176   __ movl(rax, 1);
4177 
4178   // Collect counts on whether this test sees NULLs a lot or not.
4179   if (ProfileInterpreter) {
4180     __ jmp(done);
4181     __ bind(is_null);
4182     __ profile_null_seen(rcx);
4183   } else {
4184     __ bind(is_null);   // same as 'done'
4185   }
4186   __ bind(done);
4187   // rax = 0: obj == NULL or  obj is not an instanceof the specified klass
4188   // rax = 1: obj != NULL and obj is     an instanceof the specified klass
4189 }
4190 
4191 
4192 //----------------------------------------------------------------------------------------------------
4193 // Breakpoints
4194 void TemplateTable::_breakpoint() {
4195   // Note: We get here even if we are single stepping..
4196   // jbug insists on setting breakpoints at every bytecode
4197   // even if we are in single step mode.
4198 
4199   transition(vtos, vtos);
4200 
4201   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4202 
4203   // get the unpatched byte code
4204   __ get_method(rarg);
4205   __ call_VM(noreg,
4206              CAST_FROM_FN_PTR(address,
4207                               InterpreterRuntime::get_original_bytecode_at),
4208              rarg, rbcp);
4209   __ mov(rbx, rax);  // why?
4210 
4211   // post the breakpoint event
4212   __ get_method(rarg);
4213   __ call_VM(noreg,
4214              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4215              rarg, rbcp);
4216 
4217   // complete the execution of original bytecode
4218   __ dispatch_only_normal(vtos);
4219 }
4220 
4221 //-----------------------------------------------------------------------------
4222 // Exceptions
4223 
4224 void TemplateTable::athrow() {
4225   transition(atos, vtos);
4226   __ null_check(rax);
4227   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4228 }
4229 
4230 //-----------------------------------------------------------------------------
4231 // Synchronization
4232 //
4233 // Note: monitorenter & exit are symmetric routines; which is reflected
4234 //       in the assembly code structure as well
4235 //
4236 // Stack layout:
4237 //
4238 // [expressions  ] <--- rsp               = expression stack top
4239 // ..
4240 // [expressions  ]
4241 // [monitor entry] <--- monitor block top = expression stack bot
4242 // ..
4243 // [monitor entry]
4244 // [frame data   ] <--- monitor block bot
4245 // ...
4246 // [saved rbp    ] <--- rbp
4247 void TemplateTable::monitorenter() {
4248   transition(atos, vtos);
4249 
4250   // check for NULL object
4251   __ null_check(rax);
4252 
4253   const Address monitor_block_top(
4254         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4255   const Address monitor_block_bot(
4256         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4257   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4258 
4259   Label allocated;
4260 
4261   Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4262   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4263   Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4264 
4265   // initialize entry pointer
4266   __ xorl(rmon, rmon); // points to free slot or NULL
4267 
4268   // find a free slot in the monitor block (result in rmon)
4269   {
4270     Label entry, loop, exit;
4271     __ movptr(rtop, monitor_block_top); // points to current entry,
4272                                         // starting with top-most entry
4273     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4274                                         // of monitor block
4275     __ jmpb(entry);
4276 
4277     __ bind(loop);
4278     // check if current entry is used
4279     __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
4280     // if not used then remember entry in rmon
4281     __ cmovptr(Assembler::equal, rmon, rtop);   // cmov => cmovptr
4282     // check if current entry is for same object
4283     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4284     // if same object then stop searching
4285     __ jccb(Assembler::equal, exit);
4286     // otherwise advance to next entry
4287     __ addptr(rtop, entry_size);
4288     __ bind(entry);
4289     // check if bottom reached
4290     __ cmpptr(rtop, rbot);
4291     // if not at bottom then check this entry
4292     __ jcc(Assembler::notEqual, loop);
4293     __ bind(exit);
4294   }
4295 
4296   __ testptr(rmon, rmon); // check if a slot has been found
4297   __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4298 
4299   // allocate one if there's no free slot
4300   {
4301     Label entry, loop;
4302     // 1. compute new pointers          // rsp: old expression stack top
4303     __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4304     __ subptr(rsp, entry_size);         // move expression stack top
4305     __ subptr(rmon, entry_size);        // move expression stack bottom
4306     __ mov(rtop, rsp);                  // set start value for copy loop
4307     __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4308     __ jmp(entry);
4309     // 2. move expression stack contents
4310     __ bind(loop);
4311     __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4312                                                 // word from old location
4313     __ movptr(Address(rtop, 0), rbot);          // and store it at new location
4314     __ addptr(rtop, wordSize);                  // advance to next word
4315     __ bind(entry);
4316     __ cmpptr(rtop, rmon);                      // check if bottom reached
4317     __ jcc(Assembler::notEqual, loop);          // if not at bottom then
4318                                                 // copy next word
4319   }
4320 
4321   // call run-time routine
4322   // rmon: points to monitor entry
4323   __ bind(allocated);
4324 
4325   // Increment bcp to point to the next bytecode, so exception
4326   // handling for async. exceptions work correctly.
4327   // The object has already been popped from the stack, so the
4328   // expression stack looks correct.
4329   __ increment(rbcp);
4330 
4331   // store object
4332   __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4333   __ lock_object(rmon);
4334 
4335   // check to make sure this monitor doesn't cause stack overflow after locking
4336   __ save_bcp();  // in case of exception
4337   __ generate_stack_overflow_check(0);
4338 
4339   // The bcp has already been incremented. Just need to dispatch to
4340   // next instruction.
4341   __ dispatch_next(vtos);
4342 }
4343 
4344 void TemplateTable::monitorexit() {
4345   transition(atos, vtos);
4346 
4347   // check for NULL object
4348   __ null_check(rax);
4349 
4350   const Address monitor_block_top(
4351         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4352   const Address monitor_block_bot(
4353         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4354   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4355 
4356   Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4357   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4358 
4359   Label found;
4360 
4361   // find matching slot
4362   {
4363     Label entry, loop;
4364     __ movptr(rtop, monitor_block_top); // points to current entry,
4365                                         // starting with top-most entry
4366     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4367                                         // of monitor block
4368     __ jmpb(entry);
4369 
4370     __ bind(loop);
4371     // check if current entry is for same object
4372     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4373     // if same object then stop searching
4374     __ jcc(Assembler::equal, found);
4375     // otherwise advance to next entry
4376     __ addptr(rtop, entry_size);
4377     __ bind(entry);
4378     // check if bottom reached
4379     __ cmpptr(rtop, rbot);
4380     // if not at bottom then check this entry
4381     __ jcc(Assembler::notEqual, loop);
4382   }
4383 
4384   // error handling. Unlocking was not block-structured
4385   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4386                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4387   __ should_not_reach_here();
4388 
4389   // call run-time routine
4390   __ bind(found);
4391   __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4392   __ unlock_object(rtop);
4393   __ pop_ptr(rax); // discard object
4394 }
4395 
4396 // Wide instructions
4397 void TemplateTable::wide() {
4398   transition(vtos, vtos);
4399   __ load_unsigned_byte(rbx, at_bcp(1));
4400   ExternalAddress wtable((address)Interpreter::_wentry_point);
4401   __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)), rscratch1);
4402   // Note: the rbcp increment step is part of the individual wide bytecode implementations
4403 }
4404 
4405 // Multi arrays
4406 void TemplateTable::multianewarray() {
4407   transition(vtos, atos);
4408 
4409   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4410   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4411   // last dim is on top of stack; we want address of first one:
4412   // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4413   // the latter wordSize to point to the beginning of the array.
4414   __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4415   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4416   __ load_unsigned_byte(rbx, at_bcp(3));
4417   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
4418 }