1 /*
   2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "compiler/disassembler.hpp"
  28 #include "gc/shared/collectedHeap.hpp"
  29 #include "gc/shared/tlab_globals.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "interpreter/interpreterRuntime.hpp"
  32 #include "interpreter/interp_masm.hpp"
  33 #include "interpreter/templateTable.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/methodData.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "oops/oop.inline.hpp"

  38 #include "prims/jvmtiExport.hpp"
  39 #include "prims/methodHandles.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/synchronizer.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  48 
  49 // Global Register Names
  50 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  51 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  52 
  53 // Address Computation: local variables
  54 static inline Address iaddress(int n) {
  55   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  56 }
  57 
  58 static inline Address laddress(int n) {
  59   return iaddress(n + 1);
  60 }
  61 
  62 #ifndef _LP64
  63 static inline Address haddress(int n) {
  64   return iaddress(n + 0);
  65 }
  66 #endif
  67 
  68 static inline Address faddress(int n) {
  69   return iaddress(n);
  70 }
  71 
  72 static inline Address daddress(int n) {
  73   return laddress(n);
  74 }
  75 
  76 static inline Address aaddress(int n) {
  77   return iaddress(n);
  78 }
  79 
  80 static inline Address iaddress(Register r) {
  81   return Address(rlocals, r, Address::times_ptr);
  82 }
  83 
  84 static inline Address laddress(Register r) {
  85   return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
  86 }
  87 
  88 #ifndef _LP64
  89 static inline Address haddress(Register r)       {
  90   return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
  91 }
  92 #endif
  93 
  94 static inline Address faddress(Register r) {
  95   return iaddress(r);
  96 }
  97 
  98 static inline Address daddress(Register r) {
  99   return laddress(r);
 100 }
 101 
 102 static inline Address aaddress(Register r) {
 103   return iaddress(r);
 104 }
 105 
 106 
 107 // expression stack
 108 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
 109 // data beyond the rsp which is potentially unsafe in an MT environment;
 110 // an interrupt may overwrite that data.)
 111 static inline Address at_rsp   () {
 112   return Address(rsp, 0);
 113 }
 114 
 115 // At top of Java expression stack which may be different than esp().  It
 116 // isn't for category 1 objects.
 117 static inline Address at_tos   () {
 118   return Address(rsp,  Interpreter::expr_offset_in_bytes(0));
 119 }
 120 
 121 static inline Address at_tos_p1() {
 122   return Address(rsp,  Interpreter::expr_offset_in_bytes(1));
 123 }
 124 
 125 static inline Address at_tos_p2() {
 126   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
 127 }
 128 
 129 // Condition conversion
 130 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 131   switch (cc) {
 132   case TemplateTable::equal        : return Assembler::notEqual;
 133   case TemplateTable::not_equal    : return Assembler::equal;
 134   case TemplateTable::less         : return Assembler::greaterEqual;
 135   case TemplateTable::less_equal   : return Assembler::greater;
 136   case TemplateTable::greater      : return Assembler::lessEqual;
 137   case TemplateTable::greater_equal: return Assembler::less;
 138   }
 139   ShouldNotReachHere();
 140   return Assembler::zero;
 141 }
 142 
 143 
 144 
 145 // Miscelaneous helper routines
 146 // Store an oop (or NULL) at the address described by obj.
 147 // If val == noreg this means store a NULL
 148 
 149 
 150 static void do_oop_store(InterpreterMacroAssembler* _masm,
 151                          Address dst,
 152                          Register val,
 153                          DecoratorSet decorators = 0) {
 154   assert(val == noreg || val == rax, "parameter is just for looks");
 155   __ store_heap_oop(dst, val, rdx, rbx, decorators);
 156 }
 157 
 158 static void do_oop_load(InterpreterMacroAssembler* _masm,
 159                         Address src,
 160                         Register dst,
 161                         DecoratorSet decorators = 0) {
 162   __ load_heap_oop(dst, src, rdx, rbx, decorators);
 163 }
 164 
 165 Address TemplateTable::at_bcp(int offset) {
 166   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 167   return Address(rbcp, offset);
 168 }
 169 
 170 
 171 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 172                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 173                                    int byte_no) {
 174   if (!RewriteBytecodes)  return;
 175   Label L_patch_done;
 176 
 177   switch (bc) {

 178   case Bytecodes::_fast_aputfield:
 179   case Bytecodes::_fast_bputfield:
 180   case Bytecodes::_fast_zputfield:
 181   case Bytecodes::_fast_cputfield:
 182   case Bytecodes::_fast_dputfield:
 183   case Bytecodes::_fast_fputfield:
 184   case Bytecodes::_fast_iputfield:
 185   case Bytecodes::_fast_lputfield:
 186   case Bytecodes::_fast_sputfield:
 187     {
 188       // We skip bytecode quickening for putfield instructions when
 189       // the put_code written to the constant pool cache is zero.
 190       // This is required so that every execution of this instruction
 191       // calls out to InterpreterRuntime::resolve_get_put to do
 192       // additional, required work.
 193       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 194       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 195       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 196       __ movl(bc_reg, bc);
 197       __ cmpl(temp_reg, (int) 0);
 198       __ jcc(Assembler::zero, L_patch_done);  // don't patch
 199     }
 200     break;
 201   default:
 202     assert(byte_no == -1, "sanity");
 203     // the pair bytecodes have already done the load.
 204     if (load_bc_into_bc_reg) {
 205       __ movl(bc_reg, bc);
 206     }
 207   }
 208 
 209   if (JvmtiExport::can_post_breakpoint()) {
 210     Label L_fast_patch;
 211     // if a breakpoint is present we can't rewrite the stream directly
 212     __ movzbl(temp_reg, at_bcp(0));
 213     __ cmpl(temp_reg, Bytecodes::_breakpoint);
 214     __ jcc(Assembler::notEqual, L_fast_patch);
 215     __ get_method(temp_reg);
 216     // Let breakpoint table handling rewrite to quicker bytecode
 217     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
 218 #ifndef ASSERT
 219     __ jmpb(L_patch_done);
 220 #else
 221     __ jmp(L_patch_done);
 222 #endif
 223     __ bind(L_fast_patch);
 224   }
 225 
 226 #ifdef ASSERT
 227   Label L_okay;
 228   __ load_unsigned_byte(temp_reg, at_bcp(0));
 229   __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
 230   __ jcc(Assembler::equal, L_okay);
 231   __ cmpl(temp_reg, bc_reg);
 232   __ jcc(Assembler::equal, L_okay);
 233   __ stop("patching the wrong bytecode");
 234   __ bind(L_okay);
 235 #endif
 236 
 237   // patch bytecode
 238   __ movb(at_bcp(0), bc_reg);
 239   __ bind(L_patch_done);
 240 }
 241 // Individual instructions
 242 
 243 
 244 void TemplateTable::nop() {
 245   transition(vtos, vtos);
 246   // nothing to do
 247 }
 248 
 249 void TemplateTable::shouldnotreachhere() {
 250   transition(vtos, vtos);
 251   __ stop("shouldnotreachhere bytecode");
 252 }
 253 
 254 void TemplateTable::aconst_null() {
 255   transition(vtos, atos);
 256   __ xorl(rax, rax);
 257 }
 258 
 259 void TemplateTable::iconst(int value) {
 260   transition(vtos, itos);
 261   if (value == 0) {
 262     __ xorl(rax, rax);
 263   } else {
 264     __ movl(rax, value);
 265   }
 266 }
 267 
 268 void TemplateTable::lconst(int value) {
 269   transition(vtos, ltos);
 270   if (value == 0) {
 271     __ xorl(rax, rax);
 272   } else {
 273     __ movl(rax, value);
 274   }
 275 #ifndef _LP64
 276   assert(value >= 0, "check this code");
 277   __ xorptr(rdx, rdx);
 278 #endif
 279 }
 280 
 281 
 282 
 283 void TemplateTable::fconst(int value) {
 284   transition(vtos, ftos);
 285   if (UseSSE >= 1) {
 286     static float one = 1.0f, two = 2.0f;
 287     switch (value) {
 288     case 0:
 289       __ xorps(xmm0, xmm0);
 290       break;
 291     case 1:
 292       __ movflt(xmm0, ExternalAddress((address) &one));
 293       break;
 294     case 2:
 295       __ movflt(xmm0, ExternalAddress((address) &two));
 296       break;
 297     default:
 298       ShouldNotReachHere();
 299       break;
 300     }
 301   } else {
 302 #ifdef _LP64
 303     ShouldNotReachHere();
 304 #else
 305            if (value == 0) { __ fldz();
 306     } else if (value == 1) { __ fld1();
 307     } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
 308     } else                 { ShouldNotReachHere();
 309     }
 310 #endif // _LP64
 311   }
 312 }
 313 
 314 void TemplateTable::dconst(int value) {
 315   transition(vtos, dtos);
 316   if (UseSSE >= 2) {
 317     static double one = 1.0;
 318     switch (value) {
 319     case 0:
 320       __ xorpd(xmm0, xmm0);
 321       break;
 322     case 1:
 323       __ movdbl(xmm0, ExternalAddress((address) &one));
 324       break;
 325     default:
 326       ShouldNotReachHere();
 327       break;
 328     }
 329   } else {
 330 #ifdef _LP64
 331     ShouldNotReachHere();
 332 #else
 333            if (value == 0) { __ fldz();
 334     } else if (value == 1) { __ fld1();
 335     } else                 { ShouldNotReachHere();
 336     }
 337 #endif
 338   }
 339 }
 340 
 341 void TemplateTable::bipush() {
 342   transition(vtos, itos);
 343   __ load_signed_byte(rax, at_bcp(1));
 344 }
 345 
 346 void TemplateTable::sipush() {
 347   transition(vtos, itos);
 348   __ load_unsigned_short(rax, at_bcp(1));
 349   __ bswapl(rax);
 350   __ sarl(rax, 16);
 351 }
 352 
 353 void TemplateTable::ldc(bool wide) {
 354   transition(vtos, vtos);
 355   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 356   Label call_ldc, notFloat, notClass, notInt, Done;
 357 
 358   if (wide) {
 359     __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 360   } else {
 361     __ load_unsigned_byte(rbx, at_bcp(1));
 362   }
 363 
 364   __ get_cpool_and_tags(rcx, rax);
 365   const int base_offset = ConstantPool::header_size() * wordSize;
 366   const int tags_offset = Array<u1>::base_offset_in_bytes();
 367 
 368   // get type
 369   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));

 370 
 371   // unresolved class - get the resolved class
 372   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
 373   __ jccb(Assembler::equal, call_ldc);
 374 
 375   // unresolved class in error state - call into runtime to throw the error
 376   // from the first resolution attempt
 377   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
 378   __ jccb(Assembler::equal, call_ldc);
 379 
 380   // resolved class - need to call vm to get java mirror of the class
 381   __ cmpl(rdx, JVM_CONSTANT_Class);
 382   __ jcc(Assembler::notEqual, notClass);
 383 
 384   __ bind(call_ldc);
 385 
 386   __ movl(rarg, wide);
 387   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
 388 
 389   __ push(atos);
 390   __ jmp(Done);
 391 
 392   __ bind(notClass);
 393   __ cmpl(rdx, JVM_CONSTANT_Float);
 394   __ jccb(Assembler::notEqual, notFloat);
 395 
 396   // ftos
 397   __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
 398   __ push(ftos);
 399   __ jmp(Done);
 400 
 401   __ bind(notFloat);
 402   __ cmpl(rdx, JVM_CONSTANT_Integer);
 403   __ jccb(Assembler::notEqual, notInt);
 404 
 405   // itos
 406   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
 407   __ push(itos);
 408   __ jmp(Done);
 409 
 410   // assume the tag is for condy; if not, the VM runtime will tell us
 411   __ bind(notInt);
 412   condy_helper(Done);
 413 
 414   __ bind(Done);
 415 }
 416 
 417 // Fast path for caching oop constants.
 418 void TemplateTable::fast_aldc(bool wide) {
 419   transition(vtos, atos);
 420 
 421   Register result = rax;
 422   Register tmp = rdx;
 423   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 424   int index_size = wide ? sizeof(u2) : sizeof(u1);
 425 
 426   Label resolved;
 427 
 428   // We are resolved if the resolved reference cache entry contains a
 429   // non-null object (String, MethodType, etc.)
 430   assert_different_registers(result, tmp);
 431   __ get_cache_index_at_bcp(tmp, 1, index_size);
 432   __ load_resolved_reference_at_index(result, tmp);
 433   __ testptr(result, result);
 434   __ jcc(Assembler::notZero, resolved);
 435 
 436   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 437 
 438   // first time invocation - must resolve first
 439   __ movl(rarg, (int)bytecode());
 440   __ call_VM(result, entry, rarg);
 441   __ bind(resolved);
 442 
 443   { // Check for the null sentinel.
 444     // If we just called the VM, it already did the mapping for us,
 445     // but it's harmless to retry.
 446     Label notNull;
 447     ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
 448     __ movptr(tmp, null_sentinel);
 449     __ resolve_oop_handle(tmp);
 450     __ cmpoop(tmp, result);
 451     __ jccb(Assembler::notEqual, notNull);
 452     __ xorptr(result, result);  // NULL object reference
 453     __ bind(notNull);
 454   }
 455 
 456   if (VerifyOops) {
 457     __ verify_oop(result);
 458   }
 459 }
 460 
 461 void TemplateTable::ldc2_w() {
 462   transition(vtos, vtos);
 463   Label notDouble, notLong, Done;
 464   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 465 
 466   __ get_cpool_and_tags(rcx, rax);
 467   const int base_offset = ConstantPool::header_size() * wordSize;
 468   const int tags_offset = Array<u1>::base_offset_in_bytes();
 469 
 470   // get type
 471   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 472   __ cmpl(rdx, JVM_CONSTANT_Double);
 473   __ jccb(Assembler::notEqual, notDouble);
 474 
 475   // dtos
 476   __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
 477   __ push(dtos);
 478 
 479   __ jmp(Done);
 480   __ bind(notDouble);
 481   __ cmpl(rdx, JVM_CONSTANT_Long);
 482   __ jccb(Assembler::notEqual, notLong);
 483 
 484   // ltos
 485   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
 486   NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
 487   __ push(ltos);
 488   __ jmp(Done);
 489 
 490   __ bind(notLong);
 491   condy_helper(Done);
 492 
 493   __ bind(Done);
 494 }
 495 
 496 void TemplateTable::condy_helper(Label& Done) {
 497   const Register obj = rax;
 498   const Register off = rbx;
 499   const Register flags = rcx;
 500   const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 501   __ movl(rarg, (int)bytecode());
 502   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
 503 #ifndef _LP64
 504   // borrow rdi from locals
 505   __ get_thread(rdi);
 506   __ get_vm_result_2(flags, rdi);
 507   __ restore_locals();
 508 #else
 509   __ get_vm_result_2(flags, r15_thread);
 510 #endif
 511   // VMr = obj = base address to find primitive value to push
 512   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 513   __ movl(off, flags);
 514   __ andl(off, ConstantPoolCacheEntry::field_index_mask);
 515   const Address field(obj, off, Address::times_1, 0*wordSize);
 516 
 517   // What sort of thing are we loading?
 518   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
 519   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
 520 
 521   switch (bytecode()) {
 522   case Bytecodes::_ldc:
 523   case Bytecodes::_ldc_w:
 524     {
 525       // tos in (itos, ftos, stos, btos, ctos, ztos)
 526       Label notInt, notFloat, notShort, notByte, notChar, notBool;
 527       __ cmpl(flags, itos);
 528       __ jcc(Assembler::notEqual, notInt);
 529       // itos
 530       __ movl(rax, field);
 531       __ push(itos);
 532       __ jmp(Done);
 533 
 534       __ bind(notInt);
 535       __ cmpl(flags, ftos);
 536       __ jcc(Assembler::notEqual, notFloat);
 537       // ftos
 538       __ load_float(field);
 539       __ push(ftos);
 540       __ jmp(Done);
 541 
 542       __ bind(notFloat);
 543       __ cmpl(flags, stos);
 544       __ jcc(Assembler::notEqual, notShort);
 545       // stos
 546       __ load_signed_short(rax, field);
 547       __ push(stos);
 548       __ jmp(Done);
 549 
 550       __ bind(notShort);
 551       __ cmpl(flags, btos);
 552       __ jcc(Assembler::notEqual, notByte);
 553       // btos
 554       __ load_signed_byte(rax, field);
 555       __ push(btos);
 556       __ jmp(Done);
 557 
 558       __ bind(notByte);
 559       __ cmpl(flags, ctos);
 560       __ jcc(Assembler::notEqual, notChar);
 561       // ctos
 562       __ load_unsigned_short(rax, field);
 563       __ push(ctos);
 564       __ jmp(Done);
 565 
 566       __ bind(notChar);
 567       __ cmpl(flags, ztos);
 568       __ jcc(Assembler::notEqual, notBool);
 569       // ztos
 570       __ load_signed_byte(rax, field);
 571       __ push(ztos);
 572       __ jmp(Done);
 573 
 574       __ bind(notBool);
 575       break;
 576     }
 577 
 578   case Bytecodes::_ldc2_w:
 579     {
 580       Label notLong, notDouble;
 581       __ cmpl(flags, ltos);
 582       __ jcc(Assembler::notEqual, notLong);
 583       // ltos
 584       // Loading high word first because movptr clobbers rax
 585       NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
 586       __ movptr(rax, field);
 587       __ push(ltos);
 588       __ jmp(Done);
 589 
 590       __ bind(notLong);
 591       __ cmpl(flags, dtos);
 592       __ jcc(Assembler::notEqual, notDouble);
 593       // dtos
 594       __ load_double(field);
 595       __ push(dtos);
 596       __ jmp(Done);
 597 
 598       __ bind(notDouble);
 599       break;
 600     }
 601 
 602   default:
 603     ShouldNotReachHere();
 604   }
 605 
 606   __ stop("bad ldc/condy");
 607 }
 608 
 609 void TemplateTable::locals_index(Register reg, int offset) {
 610   __ load_unsigned_byte(reg, at_bcp(offset));
 611   __ negptr(reg);
 612 }
 613 
 614 void TemplateTable::iload() {
 615   iload_internal();
 616 }
 617 
 618 void TemplateTable::nofast_iload() {
 619   iload_internal(may_not_rewrite);
 620 }
 621 
 622 void TemplateTable::iload_internal(RewriteControl rc) {
 623   transition(vtos, itos);
 624   if (RewriteFrequentPairs && rc == may_rewrite) {
 625     Label rewrite, done;
 626     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 627     LP64_ONLY(assert(rbx != bc, "register damaged"));
 628 
 629     // get next byte
 630     __ load_unsigned_byte(rbx,
 631                           at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 632     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 633     // last two iloads in a pair.  Comparing against fast_iload means that
 634     // the next bytecode is neither an iload or a caload, and therefore
 635     // an iload pair.
 636     __ cmpl(rbx, Bytecodes::_iload);
 637     __ jcc(Assembler::equal, done);
 638 
 639     __ cmpl(rbx, Bytecodes::_fast_iload);
 640     __ movl(bc, Bytecodes::_fast_iload2);
 641 
 642     __ jccb(Assembler::equal, rewrite);
 643 
 644     // if _caload, rewrite to fast_icaload
 645     __ cmpl(rbx, Bytecodes::_caload);
 646     __ movl(bc, Bytecodes::_fast_icaload);
 647     __ jccb(Assembler::equal, rewrite);
 648 
 649     // rewrite so iload doesn't check again.
 650     __ movl(bc, Bytecodes::_fast_iload);
 651 
 652     // rewrite
 653     // bc: fast bytecode
 654     __ bind(rewrite);
 655     patch_bytecode(Bytecodes::_iload, bc, rbx, false);
 656     __ bind(done);
 657   }
 658 
 659   // Get the local value into tos
 660   locals_index(rbx);
 661   __ movl(rax, iaddress(rbx));
 662 }
 663 
 664 void TemplateTable::fast_iload2() {
 665   transition(vtos, itos);
 666   locals_index(rbx);
 667   __ movl(rax, iaddress(rbx));
 668   __ push(itos);
 669   locals_index(rbx, 3);
 670   __ movl(rax, iaddress(rbx));
 671 }
 672 
 673 void TemplateTable::fast_iload() {
 674   transition(vtos, itos);
 675   locals_index(rbx);
 676   __ movl(rax, iaddress(rbx));
 677 }
 678 
 679 void TemplateTable::lload() {
 680   transition(vtos, ltos);
 681   locals_index(rbx);
 682   __ movptr(rax, laddress(rbx));
 683   NOT_LP64(__ movl(rdx, haddress(rbx)));
 684 }
 685 
 686 void TemplateTable::fload() {
 687   transition(vtos, ftos);
 688   locals_index(rbx);
 689   __ load_float(faddress(rbx));
 690 }
 691 
 692 void TemplateTable::dload() {
 693   transition(vtos, dtos);
 694   locals_index(rbx);
 695   __ load_double(daddress(rbx));
 696 }
 697 
 698 void TemplateTable::aload() {
 699   transition(vtos, atos);
 700   locals_index(rbx);
 701   __ movptr(rax, aaddress(rbx));
 702 }
 703 
 704 void TemplateTable::locals_index_wide(Register reg) {
 705   __ load_unsigned_short(reg, at_bcp(2));
 706   __ bswapl(reg);
 707   __ shrl(reg, 16);
 708   __ negptr(reg);
 709 }
 710 
 711 void TemplateTable::wide_iload() {
 712   transition(vtos, itos);
 713   locals_index_wide(rbx);
 714   __ movl(rax, iaddress(rbx));
 715 }
 716 
 717 void TemplateTable::wide_lload() {
 718   transition(vtos, ltos);
 719   locals_index_wide(rbx);
 720   __ movptr(rax, laddress(rbx));
 721   NOT_LP64(__ movl(rdx, haddress(rbx)));
 722 }
 723 
 724 void TemplateTable::wide_fload() {
 725   transition(vtos, ftos);
 726   locals_index_wide(rbx);
 727   __ load_float(faddress(rbx));
 728 }
 729 
 730 void TemplateTable::wide_dload() {
 731   transition(vtos, dtos);
 732   locals_index_wide(rbx);
 733   __ load_double(daddress(rbx));
 734 }
 735 
 736 void TemplateTable::wide_aload() {
 737   transition(vtos, atos);
 738   locals_index_wide(rbx);
 739   __ movptr(rax, aaddress(rbx));
 740 }
 741 
 742 void TemplateTable::index_check(Register array, Register index) {
 743   // Pop ptr into array
 744   __ pop_ptr(array);
 745   index_check_without_pop(array, index);
 746 }
 747 
 748 void TemplateTable::index_check_without_pop(Register array, Register index) {
 749   // destroys rbx
 750   // check array
 751   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 752   // sign extend index for use by indexed load
 753   __ movl2ptr(index, index);
 754   // check index
 755   __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
 756   if (index != rbx) {
 757     // ??? convention: move aberrant index into rbx for exception message
 758     assert(rbx != array, "different registers");
 759     __ movl(rbx, index);
 760   }
 761   Label skip;
 762   __ jccb(Assembler::below, skip);
 763   // Pass array to create more detailed exceptions.
 764   __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
 765   __ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
 766   __ bind(skip);
 767 }
 768 
 769 void TemplateTable::iaload() {
 770   transition(itos, itos);
 771   // rax: index
 772   // rdx: array
 773   index_check(rdx, rax); // kills rbx
 774   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
 775                     Address(rdx, rax, Address::times_4,
 776                             arrayOopDesc::base_offset_in_bytes(T_INT)),
 777                     noreg, noreg);
 778 }
 779 
 780 void TemplateTable::laload() {
 781   transition(itos, ltos);
 782   // rax: index
 783   // rdx: array
 784   index_check(rdx, rax); // kills rbx
 785   NOT_LP64(__ mov(rbx, rax));
 786   // rbx,: index
 787   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
 788                     Address(rdx, rbx, Address::times_8,
 789                             arrayOopDesc::base_offset_in_bytes(T_LONG)),
 790                     noreg, noreg);
 791 }
 792 
 793 
 794 
 795 void TemplateTable::faload() {
 796   transition(itos, ftos);
 797   // rax: index
 798   // rdx: array
 799   index_check(rdx, rax); // kills rbx
 800   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
 801                     Address(rdx, rax,
 802                             Address::times_4,
 803                             arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
 804                     noreg, noreg);
 805 }
 806 
 807 void TemplateTable::daload() {
 808   transition(itos, dtos);
 809   // rax: index
 810   // rdx: array
 811   index_check(rdx, rax); // kills rbx
 812   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
 813                     Address(rdx, rax,
 814                             Address::times_8,
 815                             arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
 816                     noreg, noreg);
 817 }
 818 
 819 void TemplateTable::aaload() {
 820   transition(itos, atos);
 821   // rax: index
 822   // rdx: array
 823   index_check(rdx, rax); // kills rbx
 824   do_oop_load(_masm,
 825               Address(rdx, rax,
 826                       UseCompressedOops ? Address::times_4 : Address::times_ptr,
 827                       arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 828               rax,
 829               IS_ARRAY);


















 830 }
 831 
 832 void TemplateTable::baload() {
 833   transition(itos, itos);
 834   // rax: index
 835   // rdx: array
 836   index_check(rdx, rax); // kills rbx
 837   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
 838                     Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
 839                     noreg, noreg);
 840 }
 841 
 842 void TemplateTable::caload() {
 843   transition(itos, itos);
 844   // rax: index
 845   // rdx: array
 846   index_check(rdx, rax); // kills rbx
 847   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 848                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 849                     noreg, noreg);
 850 }
 851 
 852 // iload followed by caload frequent pair
 853 void TemplateTable::fast_icaload() {
 854   transition(vtos, itos);
 855   // load index out of locals
 856   locals_index(rbx);
 857   __ movl(rax, iaddress(rbx));
 858 
 859   // rax: index
 860   // rdx: array
 861   index_check(rdx, rax); // kills rbx
 862   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 863                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 864                     noreg, noreg);
 865 }
 866 
 867 
 868 void TemplateTable::saload() {
 869   transition(itos, itos);
 870   // rax: index
 871   // rdx: array
 872   index_check(rdx, rax); // kills rbx
 873   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
 874                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
 875                     noreg, noreg);
 876 }
 877 
 878 void TemplateTable::iload(int n) {
 879   transition(vtos, itos);
 880   __ movl(rax, iaddress(n));
 881 }
 882 
 883 void TemplateTable::lload(int n) {
 884   transition(vtos, ltos);
 885   __ movptr(rax, laddress(n));
 886   NOT_LP64(__ movptr(rdx, haddress(n)));
 887 }
 888 
 889 void TemplateTable::fload(int n) {
 890   transition(vtos, ftos);
 891   __ load_float(faddress(n));
 892 }
 893 
 894 void TemplateTable::dload(int n) {
 895   transition(vtos, dtos);
 896   __ load_double(daddress(n));
 897 }
 898 
 899 void TemplateTable::aload(int n) {
 900   transition(vtos, atos);
 901   __ movptr(rax, aaddress(n));
 902 }
 903 
 904 void TemplateTable::aload_0() {
 905   aload_0_internal();
 906 }
 907 
 908 void TemplateTable::nofast_aload_0() {
 909   aload_0_internal(may_not_rewrite);
 910 }
 911 
 912 void TemplateTable::aload_0_internal(RewriteControl rc) {
 913   transition(vtos, atos);
 914   // According to bytecode histograms, the pairs:
 915   //
 916   // _aload_0, _fast_igetfield
 917   // _aload_0, _fast_agetfield
 918   // _aload_0, _fast_fgetfield
 919   //
 920   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 921   // _aload_0 bytecode checks if the next bytecode is either
 922   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 923   // rewrites the current bytecode into a pair bytecode; otherwise it
 924   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 925   // the pair check anymore.
 926   //
 927   // Note: If the next bytecode is _getfield, the rewrite must be
 928   //       delayed, otherwise we may miss an opportunity for a pair.
 929   //
 930   // Also rewrite frequent pairs
 931   //   aload_0, aload_1
 932   //   aload_0, iload_1
 933   // These bytecodes with a small amount of code are most profitable
 934   // to rewrite
 935   if (RewriteFrequentPairs && rc == may_rewrite) {
 936     Label rewrite, done;
 937 
 938     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 939     LP64_ONLY(assert(rbx != bc, "register damaged"));
 940 
 941     // get next byte
 942     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 943 
 944     // if _getfield then wait with rewrite
 945     __ cmpl(rbx, Bytecodes::_getfield);
 946     __ jcc(Assembler::equal, done);
 947 
 948     // if _igetfield then rewrite to _fast_iaccess_0
 949     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 950     __ cmpl(rbx, Bytecodes::_fast_igetfield);
 951     __ movl(bc, Bytecodes::_fast_iaccess_0);
 952     __ jccb(Assembler::equal, rewrite);
 953 
 954     // if _agetfield then rewrite to _fast_aaccess_0
 955     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 956     __ cmpl(rbx, Bytecodes::_fast_agetfield);
 957     __ movl(bc, Bytecodes::_fast_aaccess_0);
 958     __ jccb(Assembler::equal, rewrite);
 959 
 960     // if _fgetfield then rewrite to _fast_faccess_0
 961     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 962     __ cmpl(rbx, Bytecodes::_fast_fgetfield);
 963     __ movl(bc, Bytecodes::_fast_faccess_0);
 964     __ jccb(Assembler::equal, rewrite);
 965 
 966     // else rewrite to _fast_aload0
 967     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 968     __ movl(bc, Bytecodes::_fast_aload_0);
 969 
 970     // rewrite
 971     // bc: fast bytecode
 972     __ bind(rewrite);
 973     patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
 974 
 975     __ bind(done);
 976   }
 977 
 978   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 979   aload(0);
 980 }
 981 
 982 void TemplateTable::istore() {
 983   transition(itos, vtos);
 984   locals_index(rbx);
 985   __ movl(iaddress(rbx), rax);
 986 }
 987 
 988 
 989 void TemplateTable::lstore() {
 990   transition(ltos, vtos);
 991   locals_index(rbx);
 992   __ movptr(laddress(rbx), rax);
 993   NOT_LP64(__ movptr(haddress(rbx), rdx));
 994 }
 995 
 996 void TemplateTable::fstore() {
 997   transition(ftos, vtos);
 998   locals_index(rbx);
 999   __ store_float(faddress(rbx));
1000 }
1001 
1002 void TemplateTable::dstore() {
1003   transition(dtos, vtos);
1004   locals_index(rbx);
1005   __ store_double(daddress(rbx));
1006 }
1007 
1008 void TemplateTable::astore() {
1009   transition(vtos, vtos);
1010   __ pop_ptr(rax);
1011   locals_index(rbx);
1012   __ movptr(aaddress(rbx), rax);
1013 }
1014 
1015 void TemplateTable::wide_istore() {
1016   transition(vtos, vtos);
1017   __ pop_i();
1018   locals_index_wide(rbx);
1019   __ movl(iaddress(rbx), rax);
1020 }
1021 
1022 void TemplateTable::wide_lstore() {
1023   transition(vtos, vtos);
1024   NOT_LP64(__ pop_l(rax, rdx));
1025   LP64_ONLY(__ pop_l());
1026   locals_index_wide(rbx);
1027   __ movptr(laddress(rbx), rax);
1028   NOT_LP64(__ movl(haddress(rbx), rdx));
1029 }
1030 
1031 void TemplateTable::wide_fstore() {
1032 #ifdef _LP64
1033   transition(vtos, vtos);
1034   __ pop_f(xmm0);
1035   locals_index_wide(rbx);
1036   __ movflt(faddress(rbx), xmm0);
1037 #else
1038   wide_istore();
1039 #endif
1040 }
1041 
1042 void TemplateTable::wide_dstore() {
1043 #ifdef _LP64
1044   transition(vtos, vtos);
1045   __ pop_d(xmm0);
1046   locals_index_wide(rbx);
1047   __ movdbl(daddress(rbx), xmm0);
1048 #else
1049   wide_lstore();
1050 #endif
1051 }
1052 
1053 void TemplateTable::wide_astore() {
1054   transition(vtos, vtos);
1055   __ pop_ptr(rax);
1056   locals_index_wide(rbx);
1057   __ movptr(aaddress(rbx), rax);
1058 }
1059 
1060 void TemplateTable::iastore() {
1061   transition(itos, vtos);
1062   __ pop_i(rbx);
1063   // rax: value
1064   // rbx: index
1065   // rdx: array
1066   index_check(rdx, rbx); // prefer index in rbx
1067   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1068                      Address(rdx, rbx, Address::times_4,
1069                              arrayOopDesc::base_offset_in_bytes(T_INT)),
1070                      rax, noreg, noreg);
1071 }
1072 
1073 void TemplateTable::lastore() {
1074   transition(ltos, vtos);
1075   __ pop_i(rbx);
1076   // rax,: low(value)
1077   // rcx: array
1078   // rdx: high(value)
1079   index_check(rcx, rbx);  // prefer index in rbx,
1080   // rbx,: index
1081   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1082                      Address(rcx, rbx, Address::times_8,
1083                              arrayOopDesc::base_offset_in_bytes(T_LONG)),
1084                      noreg /* ltos */, noreg, noreg);
1085 }
1086 
1087 
1088 void TemplateTable::fastore() {
1089   transition(ftos, vtos);
1090   __ pop_i(rbx);
1091   // value is in UseSSE >= 1 ? xmm0 : ST(0)
1092   // rbx:  index
1093   // rdx:  array
1094   index_check(rdx, rbx); // prefer index in rbx
1095   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1096                      Address(rdx, rbx, Address::times_4,
1097                              arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1098                      noreg /* ftos */, noreg, noreg);
1099 }
1100 
1101 void TemplateTable::dastore() {
1102   transition(dtos, vtos);
1103   __ pop_i(rbx);
1104   // value is in UseSSE >= 2 ? xmm0 : ST(0)
1105   // rbx:  index
1106   // rdx:  array
1107   index_check(rdx, rbx); // prefer index in rbx
1108   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1109                      Address(rdx, rbx, Address::times_8,
1110                              arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1111                      noreg /* dtos */, noreg, noreg);
1112 }
1113 
1114 void TemplateTable::aastore() {
1115   Label is_null, ok_is_subtype, done;
1116   transition(vtos, vtos);
1117   // stack: ..., array, index, value
1118   __ movptr(rax, at_tos());    // value
1119   __ movl(rcx, at_tos_p1()); // index
1120   __ movptr(rdx, at_tos_p2()); // array
1121 
1122   Address element_address(rdx, rcx,
1123                           UseCompressedOops? Address::times_4 : Address::times_ptr,
1124                           arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1125 
1126   index_check_without_pop(rdx, rcx);     // kills rbx




1127   __ testptr(rax, rax);
1128   __ jcc(Assembler::zero, is_null);
1129 

1130   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);






1131   // Move subklass into rbx
1132   __ load_klass(rbx, rax, tmp_load_klass);
1133   // Move superklass into rax
1134   __ load_klass(rax, rdx, tmp_load_klass);
1135   __ movptr(rax, Address(rax,
1136                          ObjArrayKlass::element_klass_offset()));
1137 
1138   // Generate subtype check.  Blows rcx, rdi
1139   // Superklass in rax.  Subklass in rbx.
1140   __ gen_subtype_check(rbx, ok_is_subtype);

1141 
1142   // Come here on failure
1143   // object is at TOS
1144   __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1145 
1146   // Come here on success
1147   __ bind(ok_is_subtype);
1148 
1149   // Get the value we will store
1150   __ movptr(rax, at_tos());
1151   __ movl(rcx, at_tos_p1()); // index
1152   // Now store using the appropriate barrier
1153   do_oop_store(_masm, element_address, rax, IS_ARRAY);
1154   __ jmp(done);
1155 
1156   // Have a NULL in rax, rdx=array, ecx=index.  Store NULL at ary[idx]
1157   __ bind(is_null);
1158   __ profile_null_seen(rbx);





1159 





1160   // Store a NULL
1161   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
















1162 


















1163   // Pop stack arguments
1164   __ bind(done);
1165   __ addptr(rsp, 3 * Interpreter::stackElementSize);
1166 }
1167 
1168 void TemplateTable::bastore() {
1169   transition(itos, vtos);
1170   __ pop_i(rbx);
1171   // rax: value
1172   // rbx: index
1173   // rdx: array
1174   index_check(rdx, rbx); // prefer index in rbx
1175   // Need to check whether array is boolean or byte
1176   // since both types share the bastore bytecode.
1177   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1178   __ load_klass(rcx, rdx, tmp_load_klass);
1179   __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1180   int diffbit = Klass::layout_helper_boolean_diffbit();
1181   __ testl(rcx, diffbit);
1182   Label L_skip;
1183   __ jccb(Assembler::zero, L_skip);
1184   __ andl(rax, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1185   __ bind(L_skip);
1186   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1187                      Address(rdx, rbx,Address::times_1,
1188                              arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1189                      rax, noreg, noreg);
1190 }
1191 
1192 void TemplateTable::castore() {
1193   transition(itos, vtos);
1194   __ pop_i(rbx);
1195   // rax: value
1196   // rbx: index
1197   // rdx: array
1198   index_check(rdx, rbx);  // prefer index in rbx
1199   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1200                      Address(rdx, rbx, Address::times_2,
1201                              arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1202                      rax, noreg, noreg);
1203 }
1204 
1205 
1206 void TemplateTable::sastore() {
1207   castore();
1208 }
1209 
1210 void TemplateTable::istore(int n) {
1211   transition(itos, vtos);
1212   __ movl(iaddress(n), rax);
1213 }
1214 
1215 void TemplateTable::lstore(int n) {
1216   transition(ltos, vtos);
1217   __ movptr(laddress(n), rax);
1218   NOT_LP64(__ movptr(haddress(n), rdx));
1219 }
1220 
1221 void TemplateTable::fstore(int n) {
1222   transition(ftos, vtos);
1223   __ store_float(faddress(n));
1224 }
1225 
1226 void TemplateTable::dstore(int n) {
1227   transition(dtos, vtos);
1228   __ store_double(daddress(n));
1229 }
1230 
1231 
1232 void TemplateTable::astore(int n) {
1233   transition(vtos, vtos);
1234   __ pop_ptr(rax);
1235   __ movptr(aaddress(n), rax);
1236 }
1237 
1238 void TemplateTable::pop() {
1239   transition(vtos, vtos);
1240   __ addptr(rsp, Interpreter::stackElementSize);
1241 }
1242 
1243 void TemplateTable::pop2() {
1244   transition(vtos, vtos);
1245   __ addptr(rsp, 2 * Interpreter::stackElementSize);
1246 }
1247 
1248 
1249 void TemplateTable::dup() {
1250   transition(vtos, vtos);
1251   __ load_ptr(0, rax);
1252   __ push_ptr(rax);
1253   // stack: ..., a, a
1254 }
1255 
1256 void TemplateTable::dup_x1() {
1257   transition(vtos, vtos);
1258   // stack: ..., a, b
1259   __ load_ptr( 0, rax);  // load b
1260   __ load_ptr( 1, rcx);  // load a
1261   __ store_ptr(1, rax);  // store b
1262   __ store_ptr(0, rcx);  // store a
1263   __ push_ptr(rax);      // push b
1264   // stack: ..., b, a, b
1265 }
1266 
1267 void TemplateTable::dup_x2() {
1268   transition(vtos, vtos);
1269   // stack: ..., a, b, c
1270   __ load_ptr( 0, rax);  // load c
1271   __ load_ptr( 2, rcx);  // load a
1272   __ store_ptr(2, rax);  // store c in a
1273   __ push_ptr(rax);      // push c
1274   // stack: ..., c, b, c, c
1275   __ load_ptr( 2, rax);  // load b
1276   __ store_ptr(2, rcx);  // store a in b
1277   // stack: ..., c, a, c, c
1278   __ store_ptr(1, rax);  // store b in c
1279   // stack: ..., c, a, b, c
1280 }
1281 
1282 void TemplateTable::dup2() {
1283   transition(vtos, vtos);
1284   // stack: ..., a, b
1285   __ load_ptr(1, rax);  // load a
1286   __ push_ptr(rax);     // push a
1287   __ load_ptr(1, rax);  // load b
1288   __ push_ptr(rax);     // push b
1289   // stack: ..., a, b, a, b
1290 }
1291 
1292 
1293 void TemplateTable::dup2_x1() {
1294   transition(vtos, vtos);
1295   // stack: ..., a, b, c
1296   __ load_ptr( 0, rcx);  // load c
1297   __ load_ptr( 1, rax);  // load b
1298   __ push_ptr(rax);      // push b
1299   __ push_ptr(rcx);      // push c
1300   // stack: ..., a, b, c, b, c
1301   __ store_ptr(3, rcx);  // store c in b
1302   // stack: ..., a, c, c, b, c
1303   __ load_ptr( 4, rcx);  // load a
1304   __ store_ptr(2, rcx);  // store a in 2nd c
1305   // stack: ..., a, c, a, b, c
1306   __ store_ptr(4, rax);  // store b in a
1307   // stack: ..., b, c, a, b, c
1308 }
1309 
1310 void TemplateTable::dup2_x2() {
1311   transition(vtos, vtos);
1312   // stack: ..., a, b, c, d
1313   __ load_ptr( 0, rcx);  // load d
1314   __ load_ptr( 1, rax);  // load c
1315   __ push_ptr(rax);      // push c
1316   __ push_ptr(rcx);      // push d
1317   // stack: ..., a, b, c, d, c, d
1318   __ load_ptr( 4, rax);  // load b
1319   __ store_ptr(2, rax);  // store b in d
1320   __ store_ptr(4, rcx);  // store d in b
1321   // stack: ..., a, d, c, b, c, d
1322   __ load_ptr( 5, rcx);  // load a
1323   __ load_ptr( 3, rax);  // load c
1324   __ store_ptr(3, rcx);  // store a in c
1325   __ store_ptr(5, rax);  // store c in a
1326   // stack: ..., c, d, a, b, c, d
1327 }
1328 
1329 void TemplateTable::swap() {
1330   transition(vtos, vtos);
1331   // stack: ..., a, b
1332   __ load_ptr( 1, rcx);  // load a
1333   __ load_ptr( 0, rax);  // load b
1334   __ store_ptr(0, rcx);  // store a in b
1335   __ store_ptr(1, rax);  // store b in a
1336   // stack: ..., b, a
1337 }
1338 
1339 void TemplateTable::iop2(Operation op) {
1340   transition(itos, itos);
1341   switch (op) {
1342   case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
1343   case sub  : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1344   case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
1345   case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
1346   case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
1347   case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
1348   case shl  : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax);      break;
1349   case shr  : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax);      break;
1350   case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax);      break;
1351   default   : ShouldNotReachHere();
1352   }
1353 }
1354 
1355 void TemplateTable::lop2(Operation op) {
1356   transition(ltos, ltos);
1357 #ifdef _LP64
1358   switch (op) {
1359   case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
1360   case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
1361   case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
1362   case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
1363   case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
1364   default   : ShouldNotReachHere();
1365   }
1366 #else
1367   __ pop_l(rbx, rcx);
1368   switch (op) {
1369     case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1370     case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1371                 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1372     case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1373     case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1374     case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1375     default   : ShouldNotReachHere();
1376   }
1377 #endif
1378 }
1379 
1380 void TemplateTable::idiv() {
1381   transition(itos, itos);
1382   __ movl(rcx, rax);
1383   __ pop_i(rax);
1384   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1385   //       they are not equal, one could do a normal division (no correction
1386   //       needed), which may speed up this implementation for the common case.
1387   //       (see also JVM spec., p.243 & p.271)
1388   __ corrected_idivl(rcx);
1389 }
1390 
1391 void TemplateTable::irem() {
1392   transition(itos, itos);
1393   __ movl(rcx, rax);
1394   __ pop_i(rax);
1395   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1396   //       they are not equal, one could do a normal division (no correction
1397   //       needed), which may speed up this implementation for the common case.
1398   //       (see also JVM spec., p.243 & p.271)
1399   __ corrected_idivl(rcx);
1400   __ movl(rax, rdx);
1401 }
1402 
1403 void TemplateTable::lmul() {
1404   transition(ltos, ltos);
1405 #ifdef _LP64
1406   __ pop_l(rdx);
1407   __ imulq(rax, rdx);
1408 #else
1409   __ pop_l(rbx, rcx);
1410   __ push(rcx); __ push(rbx);
1411   __ push(rdx); __ push(rax);
1412   __ lmul(2 * wordSize, 0);
1413   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1414 #endif
1415 }
1416 
1417 void TemplateTable::ldiv() {
1418   transition(ltos, ltos);
1419 #ifdef _LP64
1420   __ mov(rcx, rax);
1421   __ pop_l(rax);
1422   // generate explicit div0 check
1423   __ testq(rcx, rcx);
1424   __ jump_cc(Assembler::zero,
1425              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1426   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1427   //       they are not equal, one could do a normal division (no correction
1428   //       needed), which may speed up this implementation for the common case.
1429   //       (see also JVM spec., p.243 & p.271)
1430   __ corrected_idivq(rcx); // kills rbx
1431 #else
1432   __ pop_l(rbx, rcx);
1433   __ push(rcx); __ push(rbx);
1434   __ push(rdx); __ push(rax);
1435   // check if y = 0
1436   __ orl(rax, rdx);
1437   __ jump_cc(Assembler::zero,
1438              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1439   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1440   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1441 #endif
1442 }
1443 
1444 void TemplateTable::lrem() {
1445   transition(ltos, ltos);
1446 #ifdef _LP64
1447   __ mov(rcx, rax);
1448   __ pop_l(rax);
1449   __ testq(rcx, rcx);
1450   __ jump_cc(Assembler::zero,
1451              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1452   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1453   //       they are not equal, one could do a normal division (no correction
1454   //       needed), which may speed up this implementation for the common case.
1455   //       (see also JVM spec., p.243 & p.271)
1456   __ corrected_idivq(rcx); // kills rbx
1457   __ mov(rax, rdx);
1458 #else
1459   __ pop_l(rbx, rcx);
1460   __ push(rcx); __ push(rbx);
1461   __ push(rdx); __ push(rax);
1462   // check if y = 0
1463   __ orl(rax, rdx);
1464   __ jump_cc(Assembler::zero,
1465              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1466   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1467   __ addptr(rsp, 4 * wordSize);
1468 #endif
1469 }
1470 
1471 void TemplateTable::lshl() {
1472   transition(itos, ltos);
1473   __ movl(rcx, rax);                             // get shift count
1474   #ifdef _LP64
1475   __ pop_l(rax);                                 // get shift value
1476   __ shlq(rax);
1477 #else
1478   __ pop_l(rax, rdx);                            // get shift value
1479   __ lshl(rdx, rax);
1480 #endif
1481 }
1482 
1483 void TemplateTable::lshr() {
1484 #ifdef _LP64
1485   transition(itos, ltos);
1486   __ movl(rcx, rax);                             // get shift count
1487   __ pop_l(rax);                                 // get shift value
1488   __ sarq(rax);
1489 #else
1490   transition(itos, ltos);
1491   __ mov(rcx, rax);                              // get shift count
1492   __ pop_l(rax, rdx);                            // get shift value
1493   __ lshr(rdx, rax, true);
1494 #endif
1495 }
1496 
1497 void TemplateTable::lushr() {
1498   transition(itos, ltos);
1499 #ifdef _LP64
1500   __ movl(rcx, rax);                             // get shift count
1501   __ pop_l(rax);                                 // get shift value
1502   __ shrq(rax);
1503 #else
1504   __ mov(rcx, rax);                              // get shift count
1505   __ pop_l(rax, rdx);                            // get shift value
1506   __ lshr(rdx, rax);
1507 #endif
1508 }
1509 
1510 void TemplateTable::fop2(Operation op) {
1511   transition(ftos, ftos);
1512 
1513   if (UseSSE >= 1) {
1514     switch (op) {
1515     case add:
1516       __ addss(xmm0, at_rsp());
1517       __ addptr(rsp, Interpreter::stackElementSize);
1518       break;
1519     case sub:
1520       __ movflt(xmm1, xmm0);
1521       __ pop_f(xmm0);
1522       __ subss(xmm0, xmm1);
1523       break;
1524     case mul:
1525       __ mulss(xmm0, at_rsp());
1526       __ addptr(rsp, Interpreter::stackElementSize);
1527       break;
1528     case div:
1529       __ movflt(xmm1, xmm0);
1530       __ pop_f(xmm0);
1531       __ divss(xmm0, xmm1);
1532       break;
1533     case rem:
1534       // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1535       // modulo operation. The frem method calls the function
1536       // double fmod(double x, double y) in math.h. The documentation of fmod states:
1537       // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1538       // (signalling or quiet) is returned.
1539       //
1540       // On x86_32 platforms the FPU is used to perform the modulo operation. The
1541       // reason is that on 32-bit Windows the sign of modulo operations diverges from
1542       // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1543       // The fprem instruction used on x86_32 is functionally equivalent to
1544       // SharedRuntime::frem in that it returns a NaN.
1545 #ifdef _LP64
1546       __ movflt(xmm1, xmm0);
1547       __ pop_f(xmm0);
1548       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1549 #else
1550       __ push_f(xmm0);
1551       __ pop_f();
1552       __ fld_s(at_rsp());
1553       __ fremr(rax);
1554       __ f2ieee();
1555       __ pop(rax);  // pop second operand off the stack
1556       __ push_f();
1557       __ pop_f(xmm0);
1558 #endif
1559       break;
1560     default:
1561       ShouldNotReachHere();
1562       break;
1563     }
1564   } else {
1565 #ifdef _LP64
1566     ShouldNotReachHere();
1567 #else
1568     switch (op) {
1569     case add: __ fadd_s (at_rsp());                break;
1570     case sub: __ fsubr_s(at_rsp());                break;
1571     case mul: __ fmul_s (at_rsp());                break;
1572     case div: __ fdivr_s(at_rsp());                break;
1573     case rem: __ fld_s  (at_rsp()); __ fremr(rax); break;
1574     default : ShouldNotReachHere();
1575     }
1576     __ f2ieee();
1577     __ pop(rax);  // pop second operand off the stack
1578 #endif // _LP64
1579   }
1580 }
1581 
1582 void TemplateTable::dop2(Operation op) {
1583   transition(dtos, dtos);
1584   if (UseSSE >= 2) {
1585     switch (op) {
1586     case add:
1587       __ addsd(xmm0, at_rsp());
1588       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1589       break;
1590     case sub:
1591       __ movdbl(xmm1, xmm0);
1592       __ pop_d(xmm0);
1593       __ subsd(xmm0, xmm1);
1594       break;
1595     case mul:
1596       __ mulsd(xmm0, at_rsp());
1597       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1598       break;
1599     case div:
1600       __ movdbl(xmm1, xmm0);
1601       __ pop_d(xmm0);
1602       __ divsd(xmm0, xmm1);
1603       break;
1604     case rem:
1605       // Similar to fop2(), the modulo operation is performed using the
1606       // SharedRuntime::drem method (on x86_64 platforms) or using the
1607       // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1608 #ifdef _LP64
1609       __ movdbl(xmm1, xmm0);
1610       __ pop_d(xmm0);
1611       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1612 #else
1613       __ push_d(xmm0);
1614       __ pop_d();
1615       __ fld_d(at_rsp());
1616       __ fremr(rax);
1617       __ d2ieee();
1618       __ pop(rax);
1619       __ pop(rdx);
1620       __ push_d();
1621       __ pop_d(xmm0);
1622 #endif
1623       break;
1624     default:
1625       ShouldNotReachHere();
1626       break;
1627     }
1628   } else {
1629 #ifdef _LP64
1630     ShouldNotReachHere();
1631 #else
1632     switch (op) {
1633     case add: __ fadd_d (at_rsp());                break;
1634     case sub: __ fsubr_d(at_rsp());                break;
1635     case mul: {
1636       // strict semantics
1637       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
1638       __ fmulp();
1639       __ fmul_d (at_rsp());
1640       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
1641       __ fmulp();
1642       break;
1643     }
1644     case div: {
1645       // strict semantics
1646       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
1647       __ fmul_d (at_rsp());
1648       __ fdivrp();
1649       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
1650       __ fmulp();
1651       break;
1652     }
1653     case rem: __ fld_d  (at_rsp()); __ fremr(rax); break;
1654     default : ShouldNotReachHere();
1655     }
1656     __ d2ieee();
1657     // Pop double precision number from rsp.
1658     __ pop(rax);
1659     __ pop(rdx);
1660 #endif
1661   }
1662 }
1663 
1664 void TemplateTable::ineg() {
1665   transition(itos, itos);
1666   __ negl(rax);
1667 }
1668 
1669 void TemplateTable::lneg() {
1670   transition(ltos, ltos);
1671   LP64_ONLY(__ negq(rax));
1672   NOT_LP64(__ lneg(rdx, rax));
1673 }
1674 
1675 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1676 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1677   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1678   // of 128-bits operands for SSE instructions.
1679   jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1680   // Store the value to a 128-bits operand.
1681   operand[0] = lo;
1682   operand[1] = hi;
1683   return operand;
1684 }
1685 
1686 // Buffer for 128-bits masks used by SSE instructions.
1687 static jlong float_signflip_pool[2*2];
1688 static jlong double_signflip_pool[2*2];
1689 
1690 void TemplateTable::fneg() {
1691   transition(ftos, ftos);
1692   if (UseSSE >= 1) {
1693     static jlong *float_signflip  = double_quadword(&float_signflip_pool[1],  CONST64(0x8000000080000000),  CONST64(0x8000000080000000));
1694     __ xorps(xmm0, ExternalAddress((address) float_signflip));
1695   } else {
1696     LP64_ONLY(ShouldNotReachHere());
1697     NOT_LP64(__ fchs());
1698   }
1699 }
1700 
1701 void TemplateTable::dneg() {
1702   transition(dtos, dtos);
1703   if (UseSSE >= 2) {
1704     static jlong *double_signflip =
1705       double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1706     __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1707   } else {
1708 #ifdef _LP64
1709     ShouldNotReachHere();
1710 #else
1711     __ fchs();
1712 #endif
1713   }
1714 }
1715 
1716 void TemplateTable::iinc() {
1717   transition(vtos, vtos);
1718   __ load_signed_byte(rdx, at_bcp(2)); // get constant
1719   locals_index(rbx);
1720   __ addl(iaddress(rbx), rdx);
1721 }
1722 
1723 void TemplateTable::wide_iinc() {
1724   transition(vtos, vtos);
1725   __ movl(rdx, at_bcp(4)); // get constant
1726   locals_index_wide(rbx);
1727   __ bswapl(rdx); // swap bytes & sign-extend constant
1728   __ sarl(rdx, 16);
1729   __ addl(iaddress(rbx), rdx);
1730   // Note: should probably use only one movl to get both
1731   //       the index and the constant -> fix this
1732 }
1733 
1734 void TemplateTable::convert() {
1735 #ifdef _LP64
1736   // Checking
1737 #ifdef ASSERT
1738   {
1739     TosState tos_in  = ilgl;
1740     TosState tos_out = ilgl;
1741     switch (bytecode()) {
1742     case Bytecodes::_i2l: // fall through
1743     case Bytecodes::_i2f: // fall through
1744     case Bytecodes::_i2d: // fall through
1745     case Bytecodes::_i2b: // fall through
1746     case Bytecodes::_i2c: // fall through
1747     case Bytecodes::_i2s: tos_in = itos; break;
1748     case Bytecodes::_l2i: // fall through
1749     case Bytecodes::_l2f: // fall through
1750     case Bytecodes::_l2d: tos_in = ltos; break;
1751     case Bytecodes::_f2i: // fall through
1752     case Bytecodes::_f2l: // fall through
1753     case Bytecodes::_f2d: tos_in = ftos; break;
1754     case Bytecodes::_d2i: // fall through
1755     case Bytecodes::_d2l: // fall through
1756     case Bytecodes::_d2f: tos_in = dtos; break;
1757     default             : ShouldNotReachHere();
1758     }
1759     switch (bytecode()) {
1760     case Bytecodes::_l2i: // fall through
1761     case Bytecodes::_f2i: // fall through
1762     case Bytecodes::_d2i: // fall through
1763     case Bytecodes::_i2b: // fall through
1764     case Bytecodes::_i2c: // fall through
1765     case Bytecodes::_i2s: tos_out = itos; break;
1766     case Bytecodes::_i2l: // fall through
1767     case Bytecodes::_f2l: // fall through
1768     case Bytecodes::_d2l: tos_out = ltos; break;
1769     case Bytecodes::_i2f: // fall through
1770     case Bytecodes::_l2f: // fall through
1771     case Bytecodes::_d2f: tos_out = ftos; break;
1772     case Bytecodes::_i2d: // fall through
1773     case Bytecodes::_l2d: // fall through
1774     case Bytecodes::_f2d: tos_out = dtos; break;
1775     default             : ShouldNotReachHere();
1776     }
1777     transition(tos_in, tos_out);
1778   }
1779 #endif // ASSERT
1780 
1781   static const int64_t is_nan = 0x8000000000000000L;
1782 
1783   // Conversion
1784   switch (bytecode()) {
1785   case Bytecodes::_i2l:
1786     __ movslq(rax, rax);
1787     break;
1788   case Bytecodes::_i2f:
1789     __ cvtsi2ssl(xmm0, rax);
1790     break;
1791   case Bytecodes::_i2d:
1792     __ cvtsi2sdl(xmm0, rax);
1793     break;
1794   case Bytecodes::_i2b:
1795     __ movsbl(rax, rax);
1796     break;
1797   case Bytecodes::_i2c:
1798     __ movzwl(rax, rax);
1799     break;
1800   case Bytecodes::_i2s:
1801     __ movswl(rax, rax);
1802     break;
1803   case Bytecodes::_l2i:
1804     __ movl(rax, rax);
1805     break;
1806   case Bytecodes::_l2f:
1807     __ cvtsi2ssq(xmm0, rax);
1808     break;
1809   case Bytecodes::_l2d:
1810     __ cvtsi2sdq(xmm0, rax);
1811     break;
1812   case Bytecodes::_f2i:
1813   {
1814     Label L;
1815     __ cvttss2sil(rax, xmm0);
1816     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1817     __ jcc(Assembler::notEqual, L);
1818     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1819     __ bind(L);
1820   }
1821     break;
1822   case Bytecodes::_f2l:
1823   {
1824     Label L;
1825     __ cvttss2siq(rax, xmm0);
1826     // NaN or overflow/underflow?
1827     __ cmp64(rax, ExternalAddress((address) &is_nan));
1828     __ jcc(Assembler::notEqual, L);
1829     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1830     __ bind(L);
1831   }
1832     break;
1833   case Bytecodes::_f2d:
1834     __ cvtss2sd(xmm0, xmm0);
1835     break;
1836   case Bytecodes::_d2i:
1837   {
1838     Label L;
1839     __ cvttsd2sil(rax, xmm0);
1840     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1841     __ jcc(Assembler::notEqual, L);
1842     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1843     __ bind(L);
1844   }
1845     break;
1846   case Bytecodes::_d2l:
1847   {
1848     Label L;
1849     __ cvttsd2siq(rax, xmm0);
1850     // NaN or overflow/underflow?
1851     __ cmp64(rax, ExternalAddress((address) &is_nan));
1852     __ jcc(Assembler::notEqual, L);
1853     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1854     __ bind(L);
1855   }
1856     break;
1857   case Bytecodes::_d2f:
1858     __ cvtsd2ss(xmm0, xmm0);
1859     break;
1860   default:
1861     ShouldNotReachHere();
1862   }
1863 #else
1864   // Checking
1865 #ifdef ASSERT
1866   { TosState tos_in  = ilgl;
1867     TosState tos_out = ilgl;
1868     switch (bytecode()) {
1869       case Bytecodes::_i2l: // fall through
1870       case Bytecodes::_i2f: // fall through
1871       case Bytecodes::_i2d: // fall through
1872       case Bytecodes::_i2b: // fall through
1873       case Bytecodes::_i2c: // fall through
1874       case Bytecodes::_i2s: tos_in = itos; break;
1875       case Bytecodes::_l2i: // fall through
1876       case Bytecodes::_l2f: // fall through
1877       case Bytecodes::_l2d: tos_in = ltos; break;
1878       case Bytecodes::_f2i: // fall through
1879       case Bytecodes::_f2l: // fall through
1880       case Bytecodes::_f2d: tos_in = ftos; break;
1881       case Bytecodes::_d2i: // fall through
1882       case Bytecodes::_d2l: // fall through
1883       case Bytecodes::_d2f: tos_in = dtos; break;
1884       default             : ShouldNotReachHere();
1885     }
1886     switch (bytecode()) {
1887       case Bytecodes::_l2i: // fall through
1888       case Bytecodes::_f2i: // fall through
1889       case Bytecodes::_d2i: // fall through
1890       case Bytecodes::_i2b: // fall through
1891       case Bytecodes::_i2c: // fall through
1892       case Bytecodes::_i2s: tos_out = itos; break;
1893       case Bytecodes::_i2l: // fall through
1894       case Bytecodes::_f2l: // fall through
1895       case Bytecodes::_d2l: tos_out = ltos; break;
1896       case Bytecodes::_i2f: // fall through
1897       case Bytecodes::_l2f: // fall through
1898       case Bytecodes::_d2f: tos_out = ftos; break;
1899       case Bytecodes::_i2d: // fall through
1900       case Bytecodes::_l2d: // fall through
1901       case Bytecodes::_f2d: tos_out = dtos; break;
1902       default             : ShouldNotReachHere();
1903     }
1904     transition(tos_in, tos_out);
1905   }
1906 #endif // ASSERT
1907 
1908   // Conversion
1909   // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1910   switch (bytecode()) {
1911     case Bytecodes::_i2l:
1912       __ extend_sign(rdx, rax);
1913       break;
1914     case Bytecodes::_i2f:
1915       if (UseSSE >= 1) {
1916         __ cvtsi2ssl(xmm0, rax);
1917       } else {
1918         __ push(rax);          // store int on tos
1919         __ fild_s(at_rsp());   // load int to ST0
1920         __ f2ieee();           // truncate to float size
1921         __ pop(rcx);           // adjust rsp
1922       }
1923       break;
1924     case Bytecodes::_i2d:
1925       if (UseSSE >= 2) {
1926         __ cvtsi2sdl(xmm0, rax);
1927       } else {
1928       __ push(rax);          // add one slot for d2ieee()
1929       __ push(rax);          // store int on tos
1930       __ fild_s(at_rsp());   // load int to ST0
1931       __ d2ieee();           // truncate to double size
1932       __ pop(rcx);           // adjust rsp
1933       __ pop(rcx);
1934       }
1935       break;
1936     case Bytecodes::_i2b:
1937       __ shll(rax, 24);      // truncate upper 24 bits
1938       __ sarl(rax, 24);      // and sign-extend byte
1939       LP64_ONLY(__ movsbl(rax, rax));
1940       break;
1941     case Bytecodes::_i2c:
1942       __ andl(rax, 0xFFFF);  // truncate upper 16 bits
1943       LP64_ONLY(__ movzwl(rax, rax));
1944       break;
1945     case Bytecodes::_i2s:
1946       __ shll(rax, 16);      // truncate upper 16 bits
1947       __ sarl(rax, 16);      // and sign-extend short
1948       LP64_ONLY(__ movswl(rax, rax));
1949       break;
1950     case Bytecodes::_l2i:
1951       /* nothing to do */
1952       break;
1953     case Bytecodes::_l2f:
1954       // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
1955       // 64-bit long values to floats. On 32-bit platforms it is not possible
1956       // to use that instruction with 64-bit operands, therefore the FPU is
1957       // used to perform the conversion.
1958       __ push(rdx);          // store long on tos
1959       __ push(rax);
1960       __ fild_d(at_rsp());   // load long to ST0
1961       __ f2ieee();           // truncate to float size
1962       __ pop(rcx);           // adjust rsp
1963       __ pop(rcx);
1964       if (UseSSE >= 1) {
1965         __ push_f();
1966         __ pop_f(xmm0);
1967       }
1968       break;
1969     case Bytecodes::_l2d:
1970       // On 32-bit platforms the FPU is used for conversion because on
1971       // 32-bit platforms it is not not possible to use the cvtsi2sdq
1972       // instruction with 64-bit operands.
1973       __ push(rdx);          // store long on tos
1974       __ push(rax);
1975       __ fild_d(at_rsp());   // load long to ST0
1976       __ d2ieee();           // truncate to double size
1977       __ pop(rcx);           // adjust rsp
1978       __ pop(rcx);
1979       if (UseSSE >= 2) {
1980         __ push_d();
1981         __ pop_d(xmm0);
1982       }
1983       break;
1984     case Bytecodes::_f2i:
1985       // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
1986       // as it returns 0 for any NaN.
1987       if (UseSSE >= 1) {
1988         __ push_f(xmm0);
1989       } else {
1990         __ push(rcx);          // reserve space for argument
1991         __ fstp_s(at_rsp());   // pass float argument on stack
1992       }
1993       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1994       break;
1995     case Bytecodes::_f2l:
1996       // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
1997       // as it returns 0 for any NaN.
1998       if (UseSSE >= 1) {
1999        __ push_f(xmm0);
2000       } else {
2001         __ push(rcx);          // reserve space for argument
2002         __ fstp_s(at_rsp());   // pass float argument on stack
2003       }
2004       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2005       break;
2006     case Bytecodes::_f2d:
2007       if (UseSSE < 1) {
2008         /* nothing to do */
2009       } else if (UseSSE == 1) {
2010         __ push_f(xmm0);
2011         __ pop_f();
2012       } else { // UseSSE >= 2
2013         __ cvtss2sd(xmm0, xmm0);
2014       }
2015       break;
2016     case Bytecodes::_d2i:
2017       if (UseSSE >= 2) {
2018         __ push_d(xmm0);
2019       } else {
2020         __ push(rcx);          // reserve space for argument
2021         __ push(rcx);
2022         __ fstp_d(at_rsp());   // pass double argument on stack
2023       }
2024       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2025       break;
2026     case Bytecodes::_d2l:
2027       if (UseSSE >= 2) {
2028         __ push_d(xmm0);
2029       } else {
2030         __ push(rcx);          // reserve space for argument
2031         __ push(rcx);
2032         __ fstp_d(at_rsp());   // pass double argument on stack
2033       }
2034       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2035       break;
2036     case Bytecodes::_d2f:
2037       if (UseSSE <= 1) {
2038         __ push(rcx);          // reserve space for f2ieee()
2039         __ f2ieee();           // truncate to float size
2040         __ pop(rcx);           // adjust rsp
2041         if (UseSSE == 1) {
2042           // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2043           // the conversion is performed using the FPU in this case.
2044           __ push_f();
2045           __ pop_f(xmm0);
2046         }
2047       } else { // UseSSE >= 2
2048         __ cvtsd2ss(xmm0, xmm0);
2049       }
2050       break;
2051     default             :
2052       ShouldNotReachHere();
2053   }
2054 #endif
2055 }
2056 
2057 void TemplateTable::lcmp() {
2058   transition(ltos, itos);
2059 #ifdef _LP64
2060   Label done;
2061   __ pop_l(rdx);
2062   __ cmpq(rdx, rax);
2063   __ movl(rax, -1);
2064   __ jccb(Assembler::less, done);
2065   __ setb(Assembler::notEqual, rax);
2066   __ movzbl(rax, rax);
2067   __ bind(done);
2068 #else
2069 
2070   // y = rdx:rax
2071   __ pop_l(rbx, rcx);             // get x = rcx:rbx
2072   __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2073   __ mov(rax, rcx);
2074 #endif
2075 }
2076 
2077 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2078   if ((is_float && UseSSE >= 1) ||
2079       (!is_float && UseSSE >= 2)) {
2080     Label done;
2081     if (is_float) {
2082       // XXX get rid of pop here, use ... reg, mem32
2083       __ pop_f(xmm1);
2084       __ ucomiss(xmm1, xmm0);
2085     } else {
2086       // XXX get rid of pop here, use ... reg, mem64
2087       __ pop_d(xmm1);
2088       __ ucomisd(xmm1, xmm0);
2089     }
2090     if (unordered_result < 0) {
2091       __ movl(rax, -1);
2092       __ jccb(Assembler::parity, done);
2093       __ jccb(Assembler::below, done);
2094       __ setb(Assembler::notEqual, rdx);
2095       __ movzbl(rax, rdx);
2096     } else {
2097       __ movl(rax, 1);
2098       __ jccb(Assembler::parity, done);
2099       __ jccb(Assembler::above, done);
2100       __ movl(rax, 0);
2101       __ jccb(Assembler::equal, done);
2102       __ decrementl(rax);
2103     }
2104     __ bind(done);
2105   } else {
2106 #ifdef _LP64
2107     ShouldNotReachHere();
2108 #else
2109     if (is_float) {
2110       __ fld_s(at_rsp());
2111     } else {
2112       __ fld_d(at_rsp());
2113       __ pop(rdx);
2114     }
2115     __ pop(rcx);
2116     __ fcmp2int(rax, unordered_result < 0);
2117 #endif // _LP64
2118   }
2119 }
2120 
2121 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2122   __ get_method(rcx); // rcx holds method
2123   __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2124                                      // holds bumped taken count
2125 
2126   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2127                              InvocationCounter::counter_offset();
2128   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2129                               InvocationCounter::counter_offset();
2130 
2131   // Load up edx with the branch displacement
2132   if (is_wide) {
2133     __ movl(rdx, at_bcp(1));
2134   } else {
2135     __ load_signed_short(rdx, at_bcp(1));
2136   }
2137   __ bswapl(rdx);
2138 
2139   if (!is_wide) {
2140     __ sarl(rdx, 16);
2141   }
2142   LP64_ONLY(__ movl2ptr(rdx, rdx));
2143 
2144   // Handle all the JSR stuff here, then exit.
2145   // It's much shorter and cleaner than intermingling with the non-JSR
2146   // normal-branch stuff occurring below.
2147   if (is_jsr) {
2148     // Pre-load the next target bytecode into rbx
2149     __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2150 
2151     // compute return address as bci in rax
2152     __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2153                         in_bytes(ConstMethod::codes_offset())));
2154     __ subptr(rax, Address(rcx, Method::const_offset()));
2155     // Adjust the bcp in r13 by the displacement in rdx
2156     __ addptr(rbcp, rdx);
2157     // jsr returns atos that is not an oop
2158     __ push_i(rax);
2159     __ dispatch_only(vtos, true);
2160     return;
2161   }
2162 
2163   // Normal (non-jsr) branch handling
2164 
2165   // Adjust the bcp in r13 by the displacement in rdx
2166   __ addptr(rbcp, rdx);
2167 
2168   assert(UseLoopCounter || !UseOnStackReplacement,
2169          "on-stack-replacement requires loop counters");
2170   Label backedge_counter_overflow;
2171   Label dispatch;
2172   if (UseLoopCounter) {
2173     // increment backedge counter for backward branches
2174     // rax: MDO
2175     // rbx: MDO bumped taken-count
2176     // rcx: method
2177     // rdx: target offset
2178     // r13: target bcp
2179     // r14: locals pointer
2180     __ testl(rdx, rdx);             // check if forward or backward branch
2181     __ jcc(Assembler::positive, dispatch); // count only if backward branch
2182 
2183     // check if MethodCounters exists
2184     Label has_counters;
2185     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2186     __ testptr(rax, rax);
2187     __ jcc(Assembler::notZero, has_counters);
2188     __ push(rdx);
2189     __ push(rcx);
2190     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2191                rcx);
2192     __ pop(rcx);
2193     __ pop(rdx);
2194     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2195     __ testptr(rax, rax);
2196     __ jcc(Assembler::zero, dispatch);
2197     __ bind(has_counters);
2198 
2199     Label no_mdo;
2200     int increment = InvocationCounter::count_increment;
2201     if (ProfileInterpreter) {
2202       // Are we profiling?
2203       __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2204       __ testptr(rbx, rbx);
2205       __ jccb(Assembler::zero, no_mdo);
2206       // Increment the MDO backedge counter
2207       const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2208           in_bytes(InvocationCounter::counter_offset()));
2209       const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2210       __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
2211           UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2212       __ jmp(dispatch);
2213     }
2214     __ bind(no_mdo);
2215     // Increment backedge counter in MethodCounters*
2216     __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2217     const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2218     __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
2219         rax, false, Assembler::zero, UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2220     __ bind(dispatch);
2221   }
2222 
2223   // Pre-load the next target bytecode into rbx
2224   __ load_unsigned_byte(rbx, Address(rbcp, 0));
2225 
2226   // continue with the bytecode @ target
2227   // rax: return bci for jsr's, unused otherwise
2228   // rbx: target bytecode
2229   // r13: target bcp
2230   __ dispatch_only(vtos, true);
2231 
2232   if (UseLoopCounter) {
2233     if (UseOnStackReplacement) {
2234       Label set_mdp;
2235       // invocation counter overflow
2236       __ bind(backedge_counter_overflow);
2237       __ negptr(rdx);
2238       __ addptr(rdx, rbcp); // branch bcp
2239       // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2240       __ call_VM(noreg,
2241                  CAST_FROM_FN_PTR(address,
2242                                   InterpreterRuntime::frequency_counter_overflow),
2243                  rdx);
2244 
2245       // rax: osr nmethod (osr ok) or NULL (osr not possible)
2246       // rdx: scratch
2247       // r14: locals pointer
2248       // r13: bcp
2249       __ testptr(rax, rax);                        // test result
2250       __ jcc(Assembler::zero, dispatch);         // no osr if null
2251       // nmethod may have been invalidated (VM may block upon call_VM return)
2252       __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2253       __ jcc(Assembler::notEqual, dispatch);
2254 
2255       // We have the address of an on stack replacement routine in rax.
2256       // In preparation of invoking it, first we must migrate the locals
2257       // and monitors from off the interpreter frame on the stack.
2258       // Ensure to save the osr nmethod over the migration call,
2259       // it will be preserved in rbx.
2260       __ mov(rbx, rax);
2261 
2262       NOT_LP64(__ get_thread(rcx));
2263 
2264       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2265 
2266       // rax is OSR buffer, move it to expected parameter location
2267       LP64_ONLY(__ mov(j_rarg0, rax));
2268       NOT_LP64(__ mov(rcx, rax));
2269       // We use j_rarg definitions here so that registers don't conflict as parameter
2270       // registers change across platforms as we are in the midst of a calling
2271       // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2272 
2273       const Register retaddr   = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2274       const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2275 
2276       // pop the interpreter frame
2277       __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2278       __ leave();                                // remove frame anchor
2279       __ pop(retaddr);                           // get return address
2280       __ mov(rsp, sender_sp);                   // set sp to sender sp
2281       // Ensure compiled code always sees stack at proper alignment
2282       __ andptr(rsp, -(StackAlignmentInBytes));
2283 
2284       // unlike x86 we need no specialized return from compiled code
2285       // to the interpreter or the call stub.
2286 
2287       // push the return address
2288       __ push(retaddr);
2289 
2290       // and begin the OSR nmethod
2291       __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2292     }
2293   }
2294 }
2295 
2296 void TemplateTable::if_0cmp(Condition cc) {
2297   transition(itos, vtos);
2298   // assume branch is more often taken than not (loops use backward branches)
2299   Label not_taken;
2300   __ testl(rax, rax);
2301   __ jcc(j_not(cc), not_taken);
2302   branch(false, false);
2303   __ bind(not_taken);
2304   __ profile_not_taken_branch(rax);
2305 }
2306 
2307 void TemplateTable::if_icmp(Condition cc) {
2308   transition(itos, vtos);
2309   // assume branch is more often taken than not (loops use backward branches)
2310   Label not_taken;
2311   __ pop_i(rdx);
2312   __ cmpl(rdx, rax);
2313   __ jcc(j_not(cc), not_taken);
2314   branch(false, false);
2315   __ bind(not_taken);
2316   __ profile_not_taken_branch(rax);
2317 }
2318 
2319 void TemplateTable::if_nullcmp(Condition cc) {
2320   transition(atos, vtos);
2321   // assume branch is more often taken than not (loops use backward branches)
2322   Label not_taken;
2323   __ testptr(rax, rax);
2324   __ jcc(j_not(cc), not_taken);
2325   branch(false, false);
2326   __ bind(not_taken);
2327   __ profile_not_taken_branch(rax);
2328 }
2329 
2330 void TemplateTable::if_acmp(Condition cc) {
2331   transition(atos, vtos);
2332   // assume branch is more often taken than not (loops use backward branches)
2333   Label not_taken;
2334   __ pop_ptr(rdx);




































2335   __ cmpoop(rdx, rax);
2336   __ jcc(j_not(cc), not_taken);

2337   branch(false, false);
2338   __ bind(not_taken);
2339   __ profile_not_taken_branch(rax);









2340 }
2341 
2342 void TemplateTable::ret() {
2343   transition(vtos, vtos);
2344   locals_index(rbx);
2345   LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2346   NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2347   __ profile_ret(rbx, rcx);
2348   __ get_method(rax);
2349   __ movptr(rbcp, Address(rax, Method::const_offset()));
2350   __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2351                       ConstMethod::codes_offset()));
2352   __ dispatch_next(vtos, 0, true);
2353 }
2354 
2355 void TemplateTable::wide_ret() {
2356   transition(vtos, vtos);
2357   locals_index_wide(rbx);
2358   __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2359   __ profile_ret(rbx, rcx);
2360   __ get_method(rax);
2361   __ movptr(rbcp, Address(rax, Method::const_offset()));
2362   __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2363   __ dispatch_next(vtos, 0, true);
2364 }
2365 
2366 void TemplateTable::tableswitch() {
2367   Label default_case, continue_execution;
2368   transition(itos, vtos);
2369 
2370   // align r13/rsi
2371   __ lea(rbx, at_bcp(BytesPerInt));
2372   __ andptr(rbx, -BytesPerInt);
2373   // load lo & hi
2374   __ movl(rcx, Address(rbx, BytesPerInt));
2375   __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2376   __ bswapl(rcx);
2377   __ bswapl(rdx);
2378   // check against lo & hi
2379   __ cmpl(rax, rcx);
2380   __ jcc(Assembler::less, default_case);
2381   __ cmpl(rax, rdx);
2382   __ jcc(Assembler::greater, default_case);
2383   // lookup dispatch offset
2384   __ subl(rax, rcx);
2385   __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2386   __ profile_switch_case(rax, rbx, rcx);
2387   // continue execution
2388   __ bind(continue_execution);
2389   __ bswapl(rdx);
2390   LP64_ONLY(__ movl2ptr(rdx, rdx));
2391   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2392   __ addptr(rbcp, rdx);
2393   __ dispatch_only(vtos, true);
2394   // handle default
2395   __ bind(default_case);
2396   __ profile_switch_default(rax);
2397   __ movl(rdx, Address(rbx, 0));
2398   __ jmp(continue_execution);
2399 }
2400 
2401 void TemplateTable::lookupswitch() {
2402   transition(itos, itos);
2403   __ stop("lookupswitch bytecode should have been rewritten");
2404 }
2405 
2406 void TemplateTable::fast_linearswitch() {
2407   transition(itos, vtos);
2408   Label loop_entry, loop, found, continue_execution;
2409   // bswap rax so we can avoid bswapping the table entries
2410   __ bswapl(rax);
2411   // align r13
2412   __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2413                                     // this instruction (change offsets
2414                                     // below)
2415   __ andptr(rbx, -BytesPerInt);
2416   // set counter
2417   __ movl(rcx, Address(rbx, BytesPerInt));
2418   __ bswapl(rcx);
2419   __ jmpb(loop_entry);
2420   // table search
2421   __ bind(loop);
2422   __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2423   __ jcc(Assembler::equal, found);
2424   __ bind(loop_entry);
2425   __ decrementl(rcx);
2426   __ jcc(Assembler::greaterEqual, loop);
2427   // default case
2428   __ profile_switch_default(rax);
2429   __ movl(rdx, Address(rbx, 0));
2430   __ jmp(continue_execution);
2431   // entry found -> get offset
2432   __ bind(found);
2433   __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2434   __ profile_switch_case(rcx, rax, rbx);
2435   // continue execution
2436   __ bind(continue_execution);
2437   __ bswapl(rdx);
2438   __ movl2ptr(rdx, rdx);
2439   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2440   __ addptr(rbcp, rdx);
2441   __ dispatch_only(vtos, true);
2442 }
2443 
2444 void TemplateTable::fast_binaryswitch() {
2445   transition(itos, vtos);
2446   // Implementation using the following core algorithm:
2447   //
2448   // int binary_search(int key, LookupswitchPair* array, int n) {
2449   //   // Binary search according to "Methodik des Programmierens" by
2450   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2451   //   int i = 0;
2452   //   int j = n;
2453   //   while (i+1 < j) {
2454   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2455   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2456   //     // where a stands for the array and assuming that the (inexisting)
2457   //     // element a[n] is infinitely big.
2458   //     int h = (i + j) >> 1;
2459   //     // i < h < j
2460   //     if (key < array[h].fast_match()) {
2461   //       j = h;
2462   //     } else {
2463   //       i = h;
2464   //     }
2465   //   }
2466   //   // R: a[i] <= key < a[i+1] or Q
2467   //   // (i.e., if key is within array, i is the correct index)
2468   //   return i;
2469   // }
2470 
2471   // Register allocation
2472   const Register key   = rax; // already set (tosca)
2473   const Register array = rbx;
2474   const Register i     = rcx;
2475   const Register j     = rdx;
2476   const Register h     = rdi;
2477   const Register temp  = rsi;
2478 
2479   // Find array start
2480   NOT_LP64(__ save_bcp());
2481 
2482   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2483                                           // get rid of this
2484                                           // instruction (change
2485                                           // offsets below)
2486   __ andptr(array, -BytesPerInt);
2487 
2488   // Initialize i & j
2489   __ xorl(i, i);                            // i = 0;
2490   __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2491 
2492   // Convert j into native byteordering
2493   __ bswapl(j);
2494 
2495   // And start
2496   Label entry;
2497   __ jmp(entry);
2498 
2499   // binary search loop
2500   {
2501     Label loop;
2502     __ bind(loop);
2503     // int h = (i + j) >> 1;
2504     __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2505     __ sarl(h, 1);                               // h = (i + j) >> 1;
2506     // if (key < array[h].fast_match()) {
2507     //   j = h;
2508     // } else {
2509     //   i = h;
2510     // }
2511     // Convert array[h].match to native byte-ordering before compare
2512     __ movl(temp, Address(array, h, Address::times_8));
2513     __ bswapl(temp);
2514     __ cmpl(key, temp);
2515     // j = h if (key <  array[h].fast_match())
2516     __ cmov32(Assembler::less, j, h);
2517     // i = h if (key >= array[h].fast_match())
2518     __ cmov32(Assembler::greaterEqual, i, h);
2519     // while (i+1 < j)
2520     __ bind(entry);
2521     __ leal(h, Address(i, 1)); // i+1
2522     __ cmpl(h, j);             // i+1 < j
2523     __ jcc(Assembler::less, loop);
2524   }
2525 
2526   // end of binary search, result index is i (must check again!)
2527   Label default_case;
2528   // Convert array[i].match to native byte-ordering before compare
2529   __ movl(temp, Address(array, i, Address::times_8));
2530   __ bswapl(temp);
2531   __ cmpl(key, temp);
2532   __ jcc(Assembler::notEqual, default_case);
2533 
2534   // entry found -> j = offset
2535   __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2536   __ profile_switch_case(i, key, array);
2537   __ bswapl(j);
2538   LP64_ONLY(__ movslq(j, j));
2539 
2540   NOT_LP64(__ restore_bcp());
2541   NOT_LP64(__ restore_locals());                           // restore rdi
2542 
2543   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2544   __ addptr(rbcp, j);
2545   __ dispatch_only(vtos, true);
2546 
2547   // default case -> j = default offset
2548   __ bind(default_case);
2549   __ profile_switch_default(i);
2550   __ movl(j, Address(array, -2 * BytesPerInt));
2551   __ bswapl(j);
2552   LP64_ONLY(__ movslq(j, j));
2553 
2554   NOT_LP64(__ restore_bcp());
2555   NOT_LP64(__ restore_locals());
2556 
2557   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2558   __ addptr(rbcp, j);
2559   __ dispatch_only(vtos, true);
2560 }
2561 
2562 void TemplateTable::_return(TosState state) {
2563   transition(state, state);
2564 
2565   assert(_desc->calls_vm(),
2566          "inconsistent calls_vm information"); // call in remove_activation
2567 
2568   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2569     assert(state == vtos, "only valid state");
2570     Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2571     __ movptr(robj, aaddress(0));
2572     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
2573     __ load_klass(rdi, robj, tmp_load_klass);
2574     __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2575     __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2576     Label skip_register_finalizer;
2577     __ jcc(Assembler::zero, skip_register_finalizer);
2578 
2579     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2580 
2581     __ bind(skip_register_finalizer);
2582   }
2583 
2584   if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2585     Label no_safepoint;
2586     NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2587 #ifdef _LP64
2588     __ testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2589 #else
2590     const Register thread = rdi;
2591     __ get_thread(thread);
2592     __ testb(Address(thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2593 #endif
2594     __ jcc(Assembler::zero, no_safepoint);
2595     __ push(state);
2596     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2597                                        InterpreterRuntime::at_safepoint));
2598     __ pop(state);
2599     __ bind(no_safepoint);
2600   }
2601 
2602   // Narrow result if state is itos but result type is smaller.
2603   // Need to narrow in the return bytecode rather than in generate_return_entry
2604   // since compiled code callers expect the result to already be narrowed.
2605   if (state == itos) {
2606     __ narrow(rax);
2607   }
2608   __ remove_activation(state, rbcp);

2609 
2610   __ jmp(rbcp);
2611 }
2612 
2613 // ----------------------------------------------------------------------------
2614 // Volatile variables demand their effects be made known to all CPU's
2615 // in order.  Store buffers on most chips allow reads & writes to
2616 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2617 // without some kind of memory barrier (i.e., it's not sufficient that
2618 // the interpreter does not reorder volatile references, the hardware
2619 // also must not reorder them).
2620 //
2621 // According to the new Java Memory Model (JMM):
2622 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2623 //     writes act as aquire & release, so:
2624 // (2) A read cannot let unrelated NON-volatile memory refs that
2625 //     happen after the read float up to before the read.  It's OK for
2626 //     non-volatile memory refs that happen before the volatile read to
2627 //     float down below it.
2628 // (3) Similar a volatile write cannot let unrelated NON-volatile
2629 //     memory refs that happen BEFORE the write float down to after the
2630 //     write.  It's OK for non-volatile memory refs that happen after the
2631 //     volatile write to float up before it.
2632 //
2633 // We only put in barriers around volatile refs (they are expensive),
2634 // not _between_ memory refs (that would require us to track the
2635 // flavor of the previous memory refs).  Requirements (2) and (3)
2636 // require some barriers before volatile stores and after volatile
2637 // loads.  These nearly cover requirement (1) but miss the
2638 // volatile-store-volatile-load case.  This final case is placed after
2639 // volatile-stores although it could just as well go before
2640 // volatile-loads.
2641 
2642 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2643   // Helper function to insert a is-volatile test and memory barrier
2644   __ membar(order_constraint);
2645 }
2646 
2647 void TemplateTable::resolve_cache_and_index(int byte_no,
2648                                             Register cache,
2649                                             Register index,
2650                                             size_t index_size) {
2651   const Register temp = rbx;
2652   assert_different_registers(cache, index, temp);
2653 
2654   Label L_clinit_barrier_slow;
2655   Label resolved;
2656 
2657   Bytecodes::Code code = bytecode();
2658   switch (code) {
2659   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2660   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2661   default: break;
2662   }
2663 
2664   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2665   __ get_cache_and_index_and_bytecode_at_bcp(cache, index, temp, byte_no, 1, index_size);
2666   __ cmpl(temp, code);  // have we resolved this bytecode?
2667   __ jcc(Assembler::equal, resolved);
2668 
2669   // resolve first time through
2670   // Class initialization barrier slow path lands here as well.
2671   __ bind(L_clinit_barrier_slow);
2672   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2673   __ movl(temp, code);
2674   __ call_VM(noreg, entry, temp);
2675   // Update registers with resolved info
2676   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
2677 
2678   __ bind(resolved);
2679 
2680   // Class initialization barrier for static methods
2681   if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2682     const Register method = temp;
2683     const Register klass  = temp;
2684     const Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2685     assert(thread != noreg, "x86_32 not supported");
2686 
2687     __ load_resolved_method_at_index(byte_no, method, cache, index);
2688     __ load_method_holder(klass, method);
2689     __ clinit_barrier(klass, thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow);
2690   }
2691 }
2692 
2693 // The cache and index registers must be set before call
2694 void TemplateTable::load_field_cp_cache_entry(Register obj,
2695                                               Register cache,
2696                                               Register index,
2697                                               Register off,
2698                                               Register flags,
2699                                               bool is_static = false) {
2700   assert_different_registers(cache, index, flags, off);
2701 
2702   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2703   // Field offset
2704   __ movptr(off, Address(cache, index, Address::times_ptr,
2705                          in_bytes(cp_base_offset +
2706                                   ConstantPoolCacheEntry::f2_offset())));
2707   // Flags
2708   __ movl(flags, Address(cache, index, Address::times_ptr,
2709                          in_bytes(cp_base_offset +
2710                                   ConstantPoolCacheEntry::flags_offset())));
2711 
2712   // klass overwrite register
2713   if (is_static) {
2714     __ movptr(obj, Address(cache, index, Address::times_ptr,
2715                            in_bytes(cp_base_offset +
2716                                     ConstantPoolCacheEntry::f1_offset())));
2717     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2718     __ movptr(obj, Address(obj, mirror_offset));
2719     __ resolve_oop_handle(obj);
2720   }
2721 }
2722 
2723 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2724                                                Register method,
2725                                                Register itable_index,
2726                                                Register flags,
2727                                                bool is_invokevirtual,
2728                                                bool is_invokevfinal, /*unused*/
2729                                                bool is_invokedynamic) {
2730   // setup registers
2731   const Register cache = rcx;
2732   const Register index = rdx;
2733   assert_different_registers(method, flags);
2734   assert_different_registers(method, cache, index);
2735   assert_different_registers(itable_index, flags);
2736   assert_different_registers(itable_index, cache, index);
2737   // determine constant pool cache field offsets
2738   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2739   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2740                                     ConstantPoolCacheEntry::flags_offset());
2741   // access constant pool cache fields
2742   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2743                                     ConstantPoolCacheEntry::f2_offset());
2744 
2745   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2746   resolve_cache_and_index(byte_no, cache, index, index_size);
2747   __ load_resolved_method_at_index(byte_no, method, cache, index);
2748 
2749   if (itable_index != noreg) {
2750     // pick up itable or appendix index from f2 also:
2751     __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2752   }
2753   __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2754 }
2755 
2756 // The registers cache and index expected to be set before call.
2757 // Correct values of the cache and index registers are preserved.
2758 void TemplateTable::jvmti_post_field_access(Register cache,
2759                                             Register index,
2760                                             bool is_static,
2761                                             bool has_tos) {
2762   if (JvmtiExport::can_post_field_access()) {
2763     // Check to see if a field access watch has been set before we take
2764     // the time to call into the VM.
2765     Label L1;
2766     assert_different_registers(cache, index, rax);
2767     __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2768     __ testl(rax,rax);
2769     __ jcc(Assembler::zero, L1);
2770 
2771     // cache entry pointer
2772     __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2773     __ shll(index, LogBytesPerWord);
2774     __ addptr(cache, index);
2775     if (is_static) {
2776       __ xorptr(rax, rax);      // NULL object reference
2777     } else {
2778       __ pop(atos);         // Get the object
2779       __ verify_oop(rax);
2780       __ push(atos);        // Restore stack state
2781     }
2782     // rax,:   object pointer or NULL
2783     // cache: cache entry pointer
2784     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2785                rax, cache);
2786     __ get_cache_and_index_at_bcp(cache, index, 1);
2787     __ bind(L1);
2788   }
2789 }
2790 
2791 void TemplateTable::pop_and_check_object(Register r) {
2792   __ pop_ptr(r);
2793   __ null_check(r);  // for field access must check obj.
2794   __ verify_oop(r);
2795 }
2796 
2797 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2798   transition(vtos, vtos);
2799 
2800   const Register cache = rcx;
2801   const Register index = rdx;
2802   const Register obj   = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2803   const Register off   = rbx;
2804   const Register flags = rax;
2805   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them

2806 
2807   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2808   jvmti_post_field_access(cache, index, is_static, false);
2809   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2810 
2811   if (!is_static) pop_and_check_object(obj);
2812 
2813   const Address field(obj, off, Address::times_1, 0*wordSize);
2814 
2815   Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj;








2816 
2817   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2818   // Make sure we don't need to mask edx after the above shift
2819   assert(btos == 0, "change code, btos != 0");
2820 
2821   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2822 
2823   __ jcc(Assembler::notZero, notByte);
2824   // btos

2825   __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
2826   __ push(btos);
2827   // Rewrite bytecode to be faster
2828   if (!is_static && rc == may_rewrite) {
2829     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2830   }
2831   __ jmp(Done);
2832 
2833   __ bind(notByte);

2834   __ cmpl(flags, ztos);
2835   __ jcc(Assembler::notEqual, notBool);
2836 
2837   // ztos (same code as btos)
2838   __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
2839   __ push(ztos);
2840   // Rewrite bytecode to be faster
2841   if (!is_static && rc == may_rewrite) {
2842     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2843     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2844   }
2845   __ jmp(Done);
2846 
2847   __ bind(notBool);
2848   __ cmpl(flags, atos);
2849   __ jcc(Assembler::notEqual, notObj);
2850   // atos
2851   do_oop_load(_masm, field, rax);
2852   __ push(atos);
2853   if (!is_static && rc == may_rewrite) {
2854     patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);




















































































2855   }
2856   __ jmp(Done);
2857 
2858   __ bind(notObj);



2859   __ cmpl(flags, itos);
2860   __ jcc(Assembler::notEqual, notInt);
2861   // itos
2862   __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
2863   __ push(itos);
2864   // Rewrite bytecode to be faster
2865   if (!is_static && rc == may_rewrite) {
2866     patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2867   }
2868   __ jmp(Done);
2869 
2870   __ bind(notInt);
2871   __ cmpl(flags, ctos);
2872   __ jcc(Assembler::notEqual, notChar);
2873   // ctos
2874   __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
2875   __ push(ctos);
2876   // Rewrite bytecode to be faster
2877   if (!is_static && rc == may_rewrite) {
2878     patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2879   }
2880   __ jmp(Done);
2881 
2882   __ bind(notChar);
2883   __ cmpl(flags, stos);
2884   __ jcc(Assembler::notEqual, notShort);
2885   // stos
2886   __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
2887   __ push(stos);
2888   // Rewrite bytecode to be faster
2889   if (!is_static && rc == may_rewrite) {
2890     patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2891   }
2892   __ jmp(Done);
2893 
2894   __ bind(notShort);
2895   __ cmpl(flags, ltos);
2896   __ jcc(Assembler::notEqual, notLong);
2897   // ltos
2898     // Generate code as if volatile (x86_32).  There just aren't enough registers to
2899     // save that information and this code is faster than the test.
2900   __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
2901   __ push(ltos);
2902   // Rewrite bytecode to be faster
2903   LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
2904   __ jmp(Done);
2905 
2906   __ bind(notLong);
2907   __ cmpl(flags, ftos);
2908   __ jcc(Assembler::notEqual, notFloat);
2909   // ftos
2910 
2911   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2912   __ push(ftos);
2913   // Rewrite bytecode to be faster
2914   if (!is_static && rc == may_rewrite) {
2915     patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2916   }
2917   __ jmp(Done);
2918 
2919   __ bind(notFloat);
2920 #ifdef ASSERT
2921   Label notDouble;
2922   __ cmpl(flags, dtos);
2923   __ jcc(Assembler::notEqual, notDouble);
2924 #endif
2925   // dtos
2926   // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
2927   __ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg, noreg);
2928   __ push(dtos);
2929   // Rewrite bytecode to be faster
2930   if (!is_static && rc == may_rewrite) {
2931     patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2932   }
2933 #ifdef ASSERT
2934   __ jmp(Done);
2935 
2936   __ bind(notDouble);
2937   __ stop("Bad state");
2938 #endif
2939 
2940   __ bind(Done);
2941   // [jk] not needed currently
2942   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2943   //                                              Assembler::LoadStore));
2944 }
2945 
2946 void TemplateTable::getfield(int byte_no) {
2947   getfield_or_static(byte_no, false);
2948 }
2949 
2950 void TemplateTable::nofast_getfield(int byte_no) {
2951   getfield_or_static(byte_no, false, may_not_rewrite);
2952 }
2953 
2954 void TemplateTable::getstatic(int byte_no) {
2955   getfield_or_static(byte_no, true);
2956 }
2957 






















2958 
2959 // The registers cache and index expected to be set before call.
2960 // The function may destroy various registers, just not the cache and index registers.
2961 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2962 
2963   const Register robj = LP64_ONLY(c_rarg2)   NOT_LP64(rax);
2964   const Register RBX  = LP64_ONLY(c_rarg1)   NOT_LP64(rbx);
2965   const Register RCX  = LP64_ONLY(c_rarg3)   NOT_LP64(rcx);
2966   const Register RDX  = LP64_ONLY(rscratch1) NOT_LP64(rdx);
2967 
2968   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2969 
2970   if (JvmtiExport::can_post_field_modification()) {
2971     // Check to see if a field modification watch has been set before
2972     // we take the time to call into the VM.
2973     Label L1;
2974     assert_different_registers(cache, index, rax);
2975     __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2976     __ testl(rax, rax);
2977     __ jcc(Assembler::zero, L1);
2978 
2979     __ get_cache_and_index_at_bcp(robj, RDX, 1);
2980 
2981 
2982     if (is_static) {
2983       // Life is simple.  Null out the object pointer.
2984       __ xorl(RBX, RBX);
2985 
2986     } else {
2987       // Life is harder. The stack holds the value on top, followed by
2988       // the object.  We don't know the size of the value, though; it
2989       // could be one or two words depending on its type. As a result,
2990       // we must find the type to determine where the object is.
2991 #ifndef _LP64
2992       Label two_word, valsize_known;
2993 #endif
2994       __ movl(RCX, Address(robj, RDX,
2995                            Address::times_ptr,
2996                            in_bytes(cp_base_offset +
2997                                      ConstantPoolCacheEntry::flags_offset())));
2998       NOT_LP64(__ mov(rbx, rsp));
2999       __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
3000 
3001       // Make sure we don't need to mask rcx after the above shift
3002       ConstantPoolCacheEntry::verify_tos_state_shift();
3003 #ifdef _LP64
3004       __ movptr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
3005       __ cmpl(c_rarg3, ltos);
3006       __ cmovptr(Assembler::equal,
3007                  c_rarg1, at_tos_p2()); // ltos (two word jvalue)
3008       __ cmpl(c_rarg3, dtos);
3009       __ cmovptr(Assembler::equal,
3010                  c_rarg1, at_tos_p2()); // dtos (two word jvalue)
3011 #else
3012       __ cmpl(rcx, ltos);
3013       __ jccb(Assembler::equal, two_word);
3014       __ cmpl(rcx, dtos);
3015       __ jccb(Assembler::equal, two_word);
3016       __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3017       __ jmpb(valsize_known);
3018 
3019       __ bind(two_word);
3020       __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3021 
3022       __ bind(valsize_known);
3023       // setup object pointer
3024       __ movptr(rbx, Address(rbx, 0));
3025 #endif
3026     }
3027     // cache entry pointer
3028     __ addptr(robj, in_bytes(cp_base_offset));
3029     __ shll(RDX, LogBytesPerWord);
3030     __ addptr(robj, RDX);
3031     // object (tos)
3032     __ mov(RCX, rsp);
3033     // c_rarg1: object pointer set up above (NULL if static)
3034     // c_rarg2: cache entry pointer
3035     // c_rarg3: jvalue object on the stack
3036     __ call_VM(noreg,
3037                CAST_FROM_FN_PTR(address,
3038                                 InterpreterRuntime::post_field_modification),
3039                RBX, robj, RCX);
3040     __ get_cache_and_index_at_bcp(cache, index, 1);
3041     __ bind(L1);
3042   }
3043 }
3044 
3045 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3046   transition(vtos, vtos);
3047 
3048   const Register cache = rcx;
3049   const Register index = rdx;
3050   const Register obj   = rcx;
3051   const Register off   = rbx;
3052   const Register flags = rax;

3053 
3054   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
3055   jvmti_post_field_mod(cache, index, is_static);
3056   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
3057 
3058   // [jk] not needed currently
3059   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3060   //                                              Assembler::StoreStore));
3061 
3062   Label notVolatile, Done;
3063   __ movl(rdx, flags);
3064   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3065   __ andl(rdx, 0x1);
3066 
3067   // Check for volatile store
3068   __ testl(rdx, rdx);

3069   __ jcc(Assembler::zero, notVolatile);
3070 
3071   putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3072   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3073                                                Assembler::StoreStore));
3074   __ jmp(Done);
3075   __ bind(notVolatile);
3076 
3077   putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3078 
3079   __ bind(Done);
3080 }
3081 
3082 void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
3083                                               Register obj, Register off, Register flags) {
3084 
3085   // field addresses
3086   const Address field(obj, off, Address::times_1, 0*wordSize);
3087   NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3088 
3089   Label notByte, notBool, notInt, notShort, notChar,
3090         notLong, notFloat, notObj;
3091   Label Done;
3092 
3093   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3094 
3095   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3096 
3097   assert(btos == 0, "change code, btos != 0");
3098   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3099   __ jcc(Assembler::notZero, notByte);
3100 
3101   // btos
3102   {
3103     __ pop(btos);
3104     if (!is_static) pop_and_check_object(obj);
3105     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3106     if (!is_static && rc == may_rewrite) {
3107       patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3108     }
3109     __ jmp(Done);
3110   }
3111 
3112   __ bind(notByte);
3113   __ cmpl(flags, ztos);
3114   __ jcc(Assembler::notEqual, notBool);
3115 
3116   // ztos
3117   {
3118     __ pop(ztos);
3119     if (!is_static) pop_and_check_object(obj);
3120     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3121     if (!is_static && rc == may_rewrite) {
3122       patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3123     }
3124     __ jmp(Done);
3125   }
3126 
3127   __ bind(notBool);
3128   __ cmpl(flags, atos);
3129   __ jcc(Assembler::notEqual, notObj);
3130 
3131   // atos
3132   {
3133     __ pop(atos);
3134     if (!is_static) pop_and_check_object(obj);
3135     // Store into the field
3136     do_oop_store(_masm, field, rax);
3137     if (!is_static && rc == may_rewrite) {
3138       patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);





















































3139     }
3140     __ jmp(Done);
3141   }
3142 
3143   __ bind(notObj);
3144   __ cmpl(flags, itos);
3145   __ jcc(Assembler::notEqual, notInt);
3146 
3147   // itos
3148   {
3149     __ pop(itos);
3150     if (!is_static) pop_and_check_object(obj);
3151     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3152     if (!is_static && rc == may_rewrite) {
3153       patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3154     }
3155     __ jmp(Done);
3156   }
3157 
3158   __ bind(notInt);
3159   __ cmpl(flags, ctos);
3160   __ jcc(Assembler::notEqual, notChar);
3161 
3162   // ctos
3163   {
3164     __ pop(ctos);
3165     if (!is_static) pop_and_check_object(obj);
3166     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3167     if (!is_static && rc == may_rewrite) {
3168       patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3169     }
3170     __ jmp(Done);
3171   }
3172 
3173   __ bind(notChar);
3174   __ cmpl(flags, stos);
3175   __ jcc(Assembler::notEqual, notShort);
3176 
3177   // stos
3178   {
3179     __ pop(stos);
3180     if (!is_static) pop_and_check_object(obj);
3181     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3182     if (!is_static && rc == may_rewrite) {
3183       patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3184     }
3185     __ jmp(Done);
3186   }
3187 
3188   __ bind(notShort);
3189   __ cmpl(flags, ltos);
3190   __ jcc(Assembler::notEqual, notLong);
3191 
3192   // ltos
3193   {
3194     __ pop(ltos);
3195     if (!is_static) pop_and_check_object(obj);
3196     // MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
3197     __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg);
3198 #ifdef _LP64
3199     if (!is_static && rc == may_rewrite) {
3200       patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3201     }
3202 #endif // _LP64
3203     __ jmp(Done);
3204   }
3205 
3206   __ bind(notLong);
3207   __ cmpl(flags, ftos);
3208   __ jcc(Assembler::notEqual, notFloat);
3209 
3210   // ftos
3211   {
3212     __ pop(ftos);
3213     if (!is_static) pop_and_check_object(obj);
3214     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
3215     if (!is_static && rc == may_rewrite) {
3216       patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3217     }
3218     __ jmp(Done);
3219   }
3220 
3221   __ bind(notFloat);
3222 #ifdef ASSERT
3223   Label notDouble;
3224   __ cmpl(flags, dtos);
3225   __ jcc(Assembler::notEqual, notDouble);
3226 #endif
3227 
3228   // dtos
3229   {
3230     __ pop(dtos);
3231     if (!is_static) pop_and_check_object(obj);
3232     // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3233     __ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg);
3234     if (!is_static && rc == may_rewrite) {
3235       patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3236     }
3237   }
3238 
3239 #ifdef ASSERT
3240   __ jmp(Done);
3241 
3242   __ bind(notDouble);
3243   __ stop("Bad state");
3244 #endif
3245 
3246   __ bind(Done);
3247 }
3248 
3249 void TemplateTable::putfield(int byte_no) {
3250   putfield_or_static(byte_no, false);
3251 }
3252 
3253 void TemplateTable::nofast_putfield(int byte_no) {
3254   putfield_or_static(byte_no, false, may_not_rewrite);
3255 }
3256 
3257 void TemplateTable::putstatic(int byte_no) {
3258   putfield_or_static(byte_no, true);
3259 }
3260 
3261 void TemplateTable::jvmti_post_fast_field_mod() {
3262 
3263   const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3264 
3265   if (JvmtiExport::can_post_field_modification()) {
3266     // Check to see if a field modification watch has been set before
3267     // we take the time to call into the VM.
3268     Label L2;
3269     __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3270     __ testl(scratch, scratch);
3271     __ jcc(Assembler::zero, L2);
3272     __ pop_ptr(rbx);                  // copy the object pointer from tos
3273     __ verify_oop(rbx);
3274     __ push_ptr(rbx);                 // put the object pointer back on tos
3275     // Save tos values before call_VM() clobbers them. Since we have
3276     // to do it for every data type, we use the saved values as the
3277     // jvalue object.
3278     switch (bytecode()) {          // load values into the jvalue object

3279     case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3280     case Bytecodes::_fast_bputfield: // fall through
3281     case Bytecodes::_fast_zputfield: // fall through
3282     case Bytecodes::_fast_sputfield: // fall through
3283     case Bytecodes::_fast_cputfield: // fall through
3284     case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3285     case Bytecodes::_fast_dputfield: __ push(dtos); break;
3286     case Bytecodes::_fast_fputfield: __ push(ftos); break;
3287     case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3288 
3289     default:
3290       ShouldNotReachHere();
3291     }
3292     __ mov(scratch, rsp);             // points to jvalue on the stack
3293     // access constant pool cache entry
3294     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3295     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3296     __ verify_oop(rbx);
3297     // rbx: object pointer copied above
3298     // c_rarg2: cache entry pointer
3299     // c_rarg3: jvalue object on the stack
3300     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3301     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3302 
3303     switch (bytecode()) {             // restore tos values

3304     case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3305     case Bytecodes::_fast_bputfield: // fall through
3306     case Bytecodes::_fast_zputfield: // fall through
3307     case Bytecodes::_fast_sputfield: // fall through
3308     case Bytecodes::_fast_cputfield: // fall through
3309     case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3310     case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3311     case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3312     case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3313     default: break;
3314     }
3315     __ bind(L2);
3316   }
3317 }
3318 
3319 void TemplateTable::fast_storefield(TosState state) {
3320   transition(state, vtos);
3321 
3322   ByteSize base = ConstantPoolCache::base_offset();
3323 
3324   jvmti_post_fast_field_mod();
3325 
3326   // access constant pool cache
3327   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3328 
3329   // test for volatile with rdx but rdx is tos register for lputfield.
3330   __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3331                        in_bytes(base +
3332                                 ConstantPoolCacheEntry::flags_offset())));
3333 
3334   // replace index with field offset from cache entry
3335   __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3336                          in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3337 
3338   // [jk] not needed currently
3339   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3340   //                                              Assembler::StoreStore));
3341 
3342   Label notVolatile, Done;




3343   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3344   __ andl(rdx, 0x1);
3345 
3346   // Get object from stack
3347   pop_and_check_object(rcx);
3348 
3349   // field address
3350   const Address field(rcx, rbx, Address::times_1);
3351 
3352   // Check for volatile store
3353   __ testl(rdx, rdx);
3354   __ jcc(Assembler::zero, notVolatile);
3355 
3356   fast_storefield_helper(field, rax);



3357   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3358                                                Assembler::StoreStore));
3359   __ jmp(Done);
3360   __ bind(notVolatile);
3361 
3362   fast_storefield_helper(field, rax);



3363 
3364   __ bind(Done);
3365 }
3366 
3367 void TemplateTable::fast_storefield_helper(Address field, Register rax) {
3368 
3369   // access field
3370   switch (bytecode()) {

















3371   case Bytecodes::_fast_aputfield:
3372     do_oop_store(_masm, field, rax);


3373     break;
3374   case Bytecodes::_fast_lputfield:
3375 #ifdef _LP64
3376     __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
3377 #else
3378   __ stop("should not be rewritten");
3379 #endif
3380     break;
3381   case Bytecodes::_fast_iputfield:
3382     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3383     break;
3384   case Bytecodes::_fast_zputfield:
3385     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3386     break;
3387   case Bytecodes::_fast_bputfield:
3388     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3389     break;
3390   case Bytecodes::_fast_sputfield:
3391     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3392     break;
3393   case Bytecodes::_fast_cputfield:
3394     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3395     break;
3396   case Bytecodes::_fast_fputfield:
3397     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg);
3398     break;
3399   case Bytecodes::_fast_dputfield:
3400     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg);
3401     break;
3402   default:
3403     ShouldNotReachHere();
3404   }
3405 }
3406 
3407 void TemplateTable::fast_accessfield(TosState state) {
3408   transition(atos, state);
3409 
3410   // Do the JVMTI work here to avoid disturbing the register state below
3411   if (JvmtiExport::can_post_field_access()) {
3412     // Check to see if a field access watch has been set before we
3413     // take the time to call into the VM.
3414     Label L1;
3415     __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3416     __ testl(rcx, rcx);
3417     __ jcc(Assembler::zero, L1);
3418     // access constant pool cache entry
3419     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3420     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3421     __ verify_oop(rax);
3422     __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
3423     LP64_ONLY(__ mov(c_rarg1, rax));
3424     // c_rarg1: object pointer copied above
3425     // c_rarg2: cache entry pointer
3426     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3427     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3428     __ pop_ptr(rax); // restore object pointer
3429     __ bind(L1);
3430   }
3431 
3432   // access constant pool cache
3433   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3434   // replace index with field offset from cache entry
3435   // [jk] not needed currently
3436   // __ movl(rdx, Address(rcx, rbx, Address::times_8,
3437   //                      in_bytes(ConstantPoolCache::base_offset() +
3438   //                               ConstantPoolCacheEntry::flags_offset())));
3439   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3440   // __ andl(rdx, 0x1);
3441   //
3442   __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3443                          in_bytes(ConstantPoolCache::base_offset() +
3444                                   ConstantPoolCacheEntry::f2_offset())));
3445 
3446   // rax: object
3447   __ verify_oop(rax);
3448   __ null_check(rax);
3449   Address field(rax, rbx, Address::times_1);
3450 
3451   // access field
3452   switch (bytecode()) {







































3453   case Bytecodes::_fast_agetfield:
3454     do_oop_load(_masm, field, rax);
3455     __ verify_oop(rax);
3456     break;
3457   case Bytecodes::_fast_lgetfield:
3458 #ifdef _LP64
3459     __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
3460 #else
3461   __ stop("should not be rewritten");
3462 #endif
3463     break;
3464   case Bytecodes::_fast_igetfield:
3465     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3466     break;
3467   case Bytecodes::_fast_bgetfield:
3468     __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3469     break;
3470   case Bytecodes::_fast_sgetfield:
3471     __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3472     break;
3473   case Bytecodes::_fast_cgetfield:
3474     __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3475     break;
3476   case Bytecodes::_fast_fgetfield:
3477     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3478     break;
3479   case Bytecodes::_fast_dgetfield:
3480     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3481     break;
3482   default:
3483     ShouldNotReachHere();
3484   }
3485   // [jk] not needed currently
3486   //   Label notVolatile;
3487   //   __ testl(rdx, rdx);
3488   //   __ jcc(Assembler::zero, notVolatile);
3489   //   __ membar(Assembler::LoadLoad);
3490   //   __ bind(notVolatile);
3491 }
3492 
3493 void TemplateTable::fast_xaccess(TosState state) {
3494   transition(vtos, state);
3495 
3496   // get receiver
3497   __ movptr(rax, aaddress(0));
3498   // access constant pool cache
3499   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3500   __ movptr(rbx,
3501             Address(rcx, rdx, Address::times_ptr,
3502                     in_bytes(ConstantPoolCache::base_offset() +
3503                              ConstantPoolCacheEntry::f2_offset())));
3504   // make sure exception is reported in correct bcp range (getfield is
3505   // next instruction)
3506   __ increment(rbcp);
3507   __ null_check(rax);
3508   const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3509   switch (state) {
3510   case itos:
3511     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3512     break;
3513   case atos:
3514     do_oop_load(_masm, field, rax);
3515     __ verify_oop(rax);
3516     break;
3517   case ftos:
3518     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3519     break;
3520   default:
3521     ShouldNotReachHere();
3522   }
3523 
3524   // [jk] not needed currently
3525   // Label notVolatile;
3526   // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3527   //                      in_bytes(ConstantPoolCache::base_offset() +
3528   //                               ConstantPoolCacheEntry::flags_offset())));
3529   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3530   // __ testl(rdx, 0x1);
3531   // __ jcc(Assembler::zero, notVolatile);
3532   // __ membar(Assembler::LoadLoad);
3533   // __ bind(notVolatile);
3534 
3535   __ decrement(rbcp);
3536 }
3537 
3538 //-----------------------------------------------------------------------------
3539 // Calls
3540 
3541 void TemplateTable::prepare_invoke(int byte_no,
3542                                    Register method,  // linked method (or i-klass)
3543                                    Register index,   // itable index, MethodType, etc.
3544                                    Register recv,    // if caller wants to see it
3545                                    Register flags    // if caller wants to test it
3546                                    ) {
3547   // determine flags
3548   const Bytecodes::Code code = bytecode();
3549   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3550   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3551   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3552   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3553   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3554   const bool load_receiver       = (recv  != noreg);
3555   const bool save_flags          = (flags != noreg);
3556   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3557   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3558   assert(flags == noreg || flags == rdx, "");
3559   assert(recv  == noreg || recv  == rcx, "");
3560 
3561   // setup registers & access constant pool cache
3562   if (recv  == noreg)  recv  = rcx;
3563   if (flags == noreg)  flags = rdx;
3564   assert_different_registers(method, index, recv, flags);
3565 
3566   // save 'interpreter return address'
3567   __ save_bcp();
3568 
3569   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3570 
3571   // maybe push appendix to arguments (just before return address)
3572   if (is_invokedynamic || is_invokehandle) {
3573     Label L_no_push;
3574     __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3575     __ jcc(Assembler::zero, L_no_push);
3576     // Push the appendix as a trailing parameter.
3577     // This must be done before we get the receiver,
3578     // since the parameter_size includes it.
3579     __ push(rbx);
3580     __ mov(rbx, index);
3581     __ load_resolved_reference_at_index(index, rbx);
3582     __ pop(rbx);
3583     __ push(index);  // push appendix (MethodType, CallSite, etc.)
3584     __ bind(L_no_push);
3585   }
3586 
3587   // load receiver if needed (after appendix is pushed so parameter size is correct)
3588   // Note: no return address pushed yet
3589   if (load_receiver) {
3590     __ movl(recv, flags);
3591     __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3592     const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
3593     const int receiver_is_at_end      = -1;  // back off one slot to get receiver
3594     Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3595     __ movptr(recv, recv_addr);
3596     __ verify_oop(recv);
3597   }
3598 
3599   if (save_flags) {
3600     __ movl(rbcp, flags);
3601   }
3602 
3603   // compute return type
3604   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3605   // Make sure we don't need to mask flags after the above shift
3606   ConstantPoolCacheEntry::verify_tos_state_shift();
3607   // load return address
3608   {
3609     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3610     ExternalAddress table(table_addr);
3611     LP64_ONLY(__ lea(rscratch1, table));
3612     LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)));
3613     NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))));
3614   }
3615 
3616   // push return address
3617   __ push(flags);
3618 
3619   // Restore flags value from the constant pool cache, and restore rsi
3620   // for later null checks.  r13 is the bytecode pointer
3621   if (save_flags) {
3622     __ movl(flags, rbcp);
3623     __ restore_bcp();
3624   }
3625 }
3626 
3627 void TemplateTable::invokevirtual_helper(Register index,
3628                                          Register recv,
3629                                          Register flags) {
3630   // Uses temporary registers rax, rdx
3631   assert_different_registers(index, recv, rax, rdx);
3632   assert(index == rbx, "");
3633   assert(recv  == rcx, "");
3634 
3635   // Test for an invoke of a final method
3636   Label notFinal;
3637   __ movl(rax, flags);
3638   __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3639   __ jcc(Assembler::zero, notFinal);
3640 
3641   const Register method = index;  // method must be rbx
3642   assert(method == rbx,
3643          "Method* must be rbx for interpreter calling convention");
3644 
3645   // do the call - the index is actually the method to call
3646   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3647 
3648   // It's final, need a null check here!
3649   __ null_check(recv);
3650 
3651   // profile this call
3652   __ profile_final_call(rax);
3653   __ profile_arguments_type(rax, method, rbcp, true);
3654 
3655   __ jump_from_interpreted(method, rax);
3656 
3657   __ bind(notFinal);
3658 
3659   // get receiver klass
3660   __ null_check(recv, oopDesc::klass_offset_in_bytes());
3661   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3662   __ load_klass(rax, recv, tmp_load_klass);
3663 
3664   // profile this call
3665   __ profile_virtual_call(rax, rlocals, rdx);
3666   // get target Method* & entry point
3667   __ lookup_virtual_method(rax, index, method);
3668 
3669   __ profile_arguments_type(rdx, method, rbcp, true);
3670   __ jump_from_interpreted(method, rdx);
3671 }
3672 
3673 void TemplateTable::invokevirtual(int byte_no) {
3674   transition(vtos, vtos);
3675   assert(byte_no == f2_byte, "use this argument");
3676   prepare_invoke(byte_no,
3677                  rbx,    // method or vtable index
3678                  noreg,  // unused itable index
3679                  rcx, rdx); // recv, flags
3680 
3681   // rbx: index
3682   // rcx: receiver
3683   // rdx: flags
3684 
3685   invokevirtual_helper(rbx, rcx, rdx);
3686 }
3687 
3688 void TemplateTable::invokespecial(int byte_no) {
3689   transition(vtos, vtos);
3690   assert(byte_no == f1_byte, "use this argument");
3691   prepare_invoke(byte_no, rbx, noreg,  // get f1 Method*
3692                  rcx);  // get receiver also for null check
3693   __ verify_oop(rcx);
3694   __ null_check(rcx);
3695   // do the call
3696   __ profile_call(rax);
3697   __ profile_arguments_type(rax, rbx, rbcp, false);
3698   __ jump_from_interpreted(rbx, rax);
3699 }
3700 
3701 void TemplateTable::invokestatic(int byte_no) {
3702   transition(vtos, vtos);
3703   assert(byte_no == f1_byte, "use this argument");
3704   prepare_invoke(byte_no, rbx);  // get f1 Method*
3705   // do the call
3706   __ profile_call(rax);
3707   __ profile_arguments_type(rax, rbx, rbcp, false);
3708   __ jump_from_interpreted(rbx, rax);
3709 }
3710 
3711 
3712 void TemplateTable::fast_invokevfinal(int byte_no) {
3713   transition(vtos, vtos);
3714   assert(byte_no == f2_byte, "use this argument");
3715   __ stop("fast_invokevfinal not used on x86");
3716 }
3717 
3718 
3719 void TemplateTable::invokeinterface(int byte_no) {
3720   transition(vtos, vtos);
3721   assert(byte_no == f1_byte, "use this argument");
3722   prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 Method*
3723                  rcx, rdx); // recv, flags
3724 
3725   // rax: reference klass (from f1) if interface method
3726   // rbx: method (from f2)
3727   // rcx: receiver
3728   // rdx: flags
3729 
3730   // First check for Object case, then private interface method,
3731   // then regular interface method.
3732 
3733   // Special case of invokeinterface called for virtual method of
3734   // java.lang.Object.  See cpCache.cpp for details.
3735   Label notObjectMethod;
3736   __ movl(rlocals, rdx);
3737   __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3738   __ jcc(Assembler::zero, notObjectMethod);
3739   invokevirtual_helper(rbx, rcx, rdx);
3740   // no return from above
3741   __ bind(notObjectMethod);
3742 
3743   Label no_such_interface; // for receiver subtype check
3744   Register recvKlass; // used for exception processing
3745 
3746   // Check for private method invocation - indicated by vfinal
3747   Label notVFinal;
3748   __ movl(rlocals, rdx);
3749   __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3750   __ jcc(Assembler::zero, notVFinal);
3751 
3752   // Get receiver klass into rlocals - also a null check
3753   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3754   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3755   __ load_klass(rlocals, rcx, tmp_load_klass);
3756 
3757   Label subtype;
3758   __ check_klass_subtype(rlocals, rax, rbcp, subtype);
3759   // If we get here the typecheck failed
3760   recvKlass = rdx;
3761   __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
3762   __ jmp(no_such_interface);
3763 
3764   __ bind(subtype);
3765 
3766   // do the call - rbx is actually the method to call
3767 
3768   __ profile_final_call(rdx);
3769   __ profile_arguments_type(rdx, rbx, rbcp, true);
3770 
3771   __ jump_from_interpreted(rbx, rdx);
3772   // no return from above
3773   __ bind(notVFinal);
3774 
3775   // Get receiver klass into rdx - also a null check
3776   __ restore_locals();  // restore r14
3777   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3778   __ load_klass(rdx, rcx, tmp_load_klass);
3779 
3780   Label no_such_method;
3781 
3782   // Preserve method for throw_AbstractMethodErrorVerbose.
3783   __ mov(rcx, rbx);
3784   // Receiver subtype check against REFC.
3785   // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
3786   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3787                              rdx, rax, noreg,
3788                              // outputs: scan temp. reg, scan temp. reg
3789                              rbcp, rlocals,
3790                              no_such_interface,
3791                              /*return_method=*/false);
3792 
3793   // profile this call
3794   __ restore_bcp(); // rbcp was destroyed by receiver type check
3795   __ profile_virtual_call(rdx, rbcp, rlocals);
3796 
3797   // Get declaring interface class from method, and itable index
3798   __ load_method_holder(rax, rbx);
3799   __ movl(rbx, Address(rbx, Method::itable_index_offset()));
3800   __ subl(rbx, Method::itable_index_max);
3801   __ negl(rbx);
3802 
3803   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3804   __ mov(rlocals, rdx);
3805   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3806                              rlocals, rax, rbx,
3807                              // outputs: method, scan temp. reg
3808                              rbx, rbcp,
3809                              no_such_interface);
3810 
3811   // rbx: Method* to call
3812   // rcx: receiver
3813   // Check for abstract method error
3814   // Note: This should be done more efficiently via a throw_abstract_method_error
3815   //       interpreter entry point and a conditional jump to it in case of a null
3816   //       method.
3817   __ testptr(rbx, rbx);
3818   __ jcc(Assembler::zero, no_such_method);
3819 
3820   __ profile_arguments_type(rdx, rbx, rbcp, true);
3821 
3822   // do the call
3823   // rcx: receiver
3824   // rbx,: Method*
3825   __ jump_from_interpreted(rbx, rdx);
3826   __ should_not_reach_here();
3827 
3828   // exception handling code follows...
3829   // note: must restore interpreter registers to canonical
3830   //       state for exception handling to work correctly!
3831 
3832   __ bind(no_such_method);
3833   // throw exception
3834   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3835   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
3836   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3837   // Pass arguments for generating a verbose error message.
3838 #ifdef _LP64
3839   recvKlass = c_rarg1;
3840   Register method    = c_rarg2;
3841   if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
3842   if (method != rcx)    { __ movq(method, rcx);    }
3843 #else
3844   recvKlass = rdx;
3845   Register method    = rcx;
3846 #endif
3847   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3848              recvKlass, method);
3849   // The call_VM checks for exception, so we should never return here.
3850   __ should_not_reach_here();
3851 
3852   __ bind(no_such_interface);
3853   // throw exception
3854   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3855   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
3856   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3857   // Pass arguments for generating a verbose error message.
3858   LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
3859   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3860              recvKlass, rax);
3861   // the call_VM checks for exception, so we should never return here.
3862   __ should_not_reach_here();
3863 }
3864 
3865 void TemplateTable::invokehandle(int byte_no) {
3866   transition(vtos, vtos);
3867   assert(byte_no == f1_byte, "use this argument");
3868   const Register rbx_method = rbx;
3869   const Register rax_mtype  = rax;
3870   const Register rcx_recv   = rcx;
3871   const Register rdx_flags  = rdx;
3872 
3873   prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3874   __ verify_method_ptr(rbx_method);
3875   __ verify_oop(rcx_recv);
3876   __ null_check(rcx_recv);
3877 
3878   // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3879   // rbx: MH.invokeExact_MT method (from f2)
3880 
3881   // Note:  rax_mtype is already pushed (if necessary) by prepare_invoke
3882 
3883   // FIXME: profile the LambdaForm also
3884   __ profile_final_call(rax);
3885   __ profile_arguments_type(rdx, rbx_method, rbcp, true);
3886 
3887   __ jump_from_interpreted(rbx_method, rdx);
3888 }
3889 
3890 void TemplateTable::invokedynamic(int byte_no) {
3891   transition(vtos, vtos);
3892   assert(byte_no == f1_byte, "use this argument");
3893 
3894   const Register rbx_method   = rbx;
3895   const Register rax_callsite = rax;
3896 
3897   prepare_invoke(byte_no, rbx_method, rax_callsite);
3898 
3899   // rax: CallSite object (from cpool->resolved_references[f1])
3900   // rbx: MH.linkToCallSite method (from f2)
3901 
3902   // Note:  rax_callsite is already pushed by prepare_invoke
3903 
3904   // %%% should make a type profile for any invokedynamic that takes a ref argument
3905   // profile this call
3906   __ profile_call(rbcp);
3907   __ profile_arguments_type(rdx, rbx_method, rbcp, false);
3908 
3909   __ verify_oop(rax_callsite);
3910 
3911   __ jump_from_interpreted(rbx_method, rdx);
3912 }
3913 
3914 //-----------------------------------------------------------------------------
3915 // Allocation
3916 
3917 void TemplateTable::_new() {
3918   transition(vtos, atos);
3919   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3920   Label slow_case;
3921   Label slow_case_no_pop;
3922   Label done;
3923   Label initialize_header;
3924   Label initialize_object;  // including clearing the fields
3925 
3926   __ get_cpool_and_tags(rcx, rax);
3927 
3928   // Make sure the class we're about to instantiate has been resolved.
3929   // This is done before loading InstanceKlass to be consistent with the order
3930   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3931   const int tags_offset = Array<u1>::base_offset_in_bytes();
3932   __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3933   __ jcc(Assembler::notEqual, slow_case_no_pop);
3934 
3935   // get InstanceKlass
3936   __ load_resolved_klass_at_index(rcx, rcx, rdx);
3937   __ push(rcx);  // save the contexts of klass for initializing the header






3938 
3939   // make sure klass is initialized & doesn't have finalizer
3940   // make sure klass is fully initialized
3941   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3942   __ jcc(Assembler::notEqual, slow_case);
3943 
3944   // get instance_size in InstanceKlass (scaled to a count of bytes)
3945   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3946   // test to see if it has a finalizer or is malformed in some way
3947   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3948   __ jcc(Assembler::notZero, slow_case);
3949 
3950   // Allocate the instance:
3951   //  If TLAB is enabled:
3952   //    Try to allocate in the TLAB.
3953   //    If fails, go to the slow path.
3954   //  Else If inline contiguous allocations are enabled:
3955   //    Try to allocate in eden.
3956   //    If fails due to heap end, go to slow path.
3957   //
3958   //  If TLAB is enabled OR inline contiguous is enabled:
3959   //    Initialize the allocation.
3960   //    Exit.
3961   //
3962   //  Go to slow path.
3963 
3964   const bool allow_shared_alloc =
3965     Universe::heap()->supports_inline_contig_alloc();
3966 
3967   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
3968 #ifndef _LP64
3969   if (UseTLAB || allow_shared_alloc) {
3970     __ get_thread(thread);
3971   }
3972 #endif // _LP64
3973 
3974   if (UseTLAB) {
3975     __ tlab_allocate(thread, rax, rdx, 0, rcx, rbx, slow_case);
3976     if (ZeroTLAB) {
3977       // the fields have been already cleared
3978       __ jmp(initialize_header);
3979     } else {
3980       // initialize both the header and fields
3981       __ jmp(initialize_object);
3982     }
3983   } else {
3984     // Allocation in the shared Eden, if allowed.
3985     //
3986     // rdx: instance size in bytes
3987     __ eden_allocate(thread, rax, rdx, 0, rbx, slow_case);
3988   }
3989 
3990   // If UseTLAB or allow_shared_alloc are true, the object is created above and
3991   // there is an initialize need. Otherwise, skip and go to the slow path.
3992   if (UseTLAB || allow_shared_alloc) {
3993     // The object is initialized before the header.  If the object size is
3994     // zero, go directly to the header initialization.
3995     __ bind(initialize_object);
3996     __ decrement(rdx, sizeof(oopDesc));
3997     __ jcc(Assembler::zero, initialize_header);
3998 
3999     // Initialize topmost object field, divide rdx by 8, check if odd and
4000     // test if zero.
4001     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
4002     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4003 
4004     // rdx must have been multiple of 8
4005 #ifdef ASSERT
4006     // make sure rdx was multiple of 8
4007     Label L;
4008     // Ignore partial flag stall after shrl() since it is debug VM
4009     __ jcc(Assembler::carryClear, L);
4010     __ stop("object size is not multiple of 2 - adjust this code");
4011     __ bind(L);
4012     // rdx must be > 0, no extra check needed here
4013 #endif
4014 
4015     // initialize remaining object fields: rdx was a multiple of 8
4016     { Label loop;
4017     __ bind(loop);
4018     __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
4019     NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
4020     __ decrement(rdx);
4021     __ jcc(Assembler::notZero, loop);
4022     }
4023 
4024     // initialize object header only.
4025     __ bind(initialize_header);
4026     __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
4027               (intptr_t)markWord::prototype().value()); // header
4028     __ pop(rcx);   // get saved klass back in the register.
4029 #ifdef _LP64
4030     __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4031     __ store_klass_gap(rax, rsi);  // zero klass gap for compressed oops
4032 #endif
4033     Register tmp_store_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
4034     __ store_klass(rax, rcx, tmp_store_klass);  // klass
4035 
4036     {
4037       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
4038       // Trigger dtrace event for fastpath
4039       __ push(atos);
4040       __ call_VM_leaf(
4041            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
4042       __ pop(atos);
4043     }
4044 
4045     __ jmp(done);
4046   }























4047 
4048   // slow case
4049   __ bind(slow_case);
4050   __ pop(rcx);   // restore stack pointer to what it was when we came in.
4051   __ bind(slow_case_no_pop);
4052 
4053   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4054   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4055 
4056   __ get_constant_pool(rarg1);
4057   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4058   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4059    __ verify_oop(rax);


4060 
4061   // continue
4062   __ bind(done);

4063 }
4064 
4065 void TemplateTable::newarray() {
4066   transition(itos, atos);
4067   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4068   __ load_unsigned_byte(rarg1, at_bcp(1));
4069   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4070           rarg1, rax);
4071 }
4072 
4073 void TemplateTable::anewarray() {
4074   transition(itos, atos);
4075 
4076   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4077   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4078 
4079   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4080   __ get_constant_pool(rarg1);
4081   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4082           rarg1, rarg2, rax);
4083 }
4084 
4085 void TemplateTable::arraylength() {
4086   transition(atos, itos);
4087   __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
4088   __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4089 }
4090 
4091 void TemplateTable::checkcast() {
4092   transition(atos, atos);
4093   Label done, is_null, ok_is_subtype, quicked, resolved;
4094   __ testptr(rax, rax); // object is in rax
4095   __ jcc(Assembler::zero, is_null);
4096 
4097   // Get cpool & tags index
4098   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4099   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4100   // See if bytecode has already been quicked
4101   __ cmpb(Address(rdx, rbx,
4102                   Address::times_1,
4103                   Array<u1>::base_offset_in_bytes()),
4104           JVM_CONSTANT_Class);

4105   __ jcc(Assembler::equal, quicked);
4106   __ push(atos); // save receiver for result, and for GC
4107   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4108 
4109   // vm_result_2 has metadata result
4110 #ifndef _LP64
4111   // borrow rdi from locals
4112   __ get_thread(rdi);
4113   __ get_vm_result_2(rax, rdi);
4114   __ restore_locals();
4115 #else
4116   __ get_vm_result_2(rax, r15_thread);
4117 #endif
4118 
4119   __ pop_ptr(rdx); // restore receiver
4120   __ jmpb(resolved);
4121 
4122   // Get superklass in rax and subklass in rbx
4123   __ bind(quicked);
4124   __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4125   __ load_resolved_klass_at_index(rax, rcx, rbx);
4126 
4127   __ bind(resolved);
4128   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
4129   __ load_klass(rbx, rdx, tmp_load_klass);
4130 
4131   // Generate subtype check.  Blows rcx, rdi.  Object in rdx.
4132   // Superklass in rax.  Subklass in rbx.
4133   __ gen_subtype_check(rbx, ok_is_subtype);
4134 
4135   // Come here on failure
4136   __ push_ptr(rdx);
4137   // object is at TOS
4138   __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4139 
4140   // Come here on success
4141   __ bind(ok_is_subtype);
4142   __ mov(rax, rdx); // Restore object in rdx



4143 
4144   // Collect counts on whether this check-cast sees NULLs a lot or not.
4145   if (ProfileInterpreter) {
4146     __ jmp(done);
4147     __ bind(is_null);
4148     __ profile_null_seen(rcx);
4149   } else {
4150     __ bind(is_null);   // same as 'done'
4151   }















4152   __ bind(done);
4153 }
4154 
4155 void TemplateTable::instanceof() {
4156   transition(atos, itos);
4157   Label done, is_null, ok_is_subtype, quicked, resolved;
4158   __ testptr(rax, rax);
4159   __ jcc(Assembler::zero, is_null);
4160 
4161   // Get cpool & tags index
4162   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4163   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4164   // See if bytecode has already been quicked
4165   __ cmpb(Address(rdx, rbx,
4166                   Address::times_1,
4167                   Array<u1>::base_offset_in_bytes()),
4168           JVM_CONSTANT_Class);

4169   __ jcc(Assembler::equal, quicked);
4170 
4171   __ push(atos); // save receiver for result, and for GC
4172   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4173   // vm_result_2 has metadata result
4174 
4175 #ifndef _LP64
4176   // borrow rdi from locals
4177   __ get_thread(rdi);
4178   __ get_vm_result_2(rax, rdi);
4179   __ restore_locals();
4180 #else
4181   __ get_vm_result_2(rax, r15_thread);
4182 #endif
4183 
4184   __ pop_ptr(rdx); // restore receiver
4185   __ verify_oop(rdx);
4186   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
4187   __ load_klass(rdx, rdx, tmp_load_klass);
4188   __ jmpb(resolved);
4189 
4190   // Get superklass in rax and subklass in rdx
4191   __ bind(quicked);
4192   __ load_klass(rdx, rax, tmp_load_klass);
4193   __ load_resolved_klass_at_index(rax, rcx, rbx);
4194 
4195   __ bind(resolved);
4196 
4197   // Generate subtype check.  Blows rcx, rdi
4198   // Superklass in rax.  Subklass in rdx.
4199   __ gen_subtype_check(rdx, ok_is_subtype);
4200 
4201   // Come here on failure
4202   __ xorl(rax, rax);
4203   __ jmpb(done);
4204   // Come here on success
4205   __ bind(ok_is_subtype);
4206   __ movl(rax, 1);
4207 
4208   // Collect counts on whether this test sees NULLs a lot or not.
4209   if (ProfileInterpreter) {
4210     __ jmp(done);
4211     __ bind(is_null);
4212     __ profile_null_seen(rcx);
4213   } else {
4214     __ bind(is_null);   // same as 'done'
4215   }
4216   __ bind(done);
4217   // rax = 0: obj == NULL or  obj is not an instanceof the specified klass
4218   // rax = 1: obj != NULL and obj is     an instanceof the specified klass
4219 }
4220 
4221 
4222 //----------------------------------------------------------------------------------------------------
4223 // Breakpoints
4224 void TemplateTable::_breakpoint() {
4225   // Note: We get here even if we are single stepping..
4226   // jbug insists on setting breakpoints at every bytecode
4227   // even if we are in single step mode.
4228 
4229   transition(vtos, vtos);
4230 
4231   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4232 
4233   // get the unpatched byte code
4234   __ get_method(rarg);
4235   __ call_VM(noreg,
4236              CAST_FROM_FN_PTR(address,
4237                               InterpreterRuntime::get_original_bytecode_at),
4238              rarg, rbcp);
4239   __ mov(rbx, rax);  // why?
4240 
4241   // post the breakpoint event
4242   __ get_method(rarg);
4243   __ call_VM(noreg,
4244              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4245              rarg, rbcp);
4246 
4247   // complete the execution of original bytecode
4248   __ dispatch_only_normal(vtos);
4249 }
4250 
4251 //-----------------------------------------------------------------------------
4252 // Exceptions
4253 
4254 void TemplateTable::athrow() {
4255   transition(atos, vtos);
4256   __ null_check(rax);
4257   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4258 }
4259 
4260 //-----------------------------------------------------------------------------
4261 // Synchronization
4262 //
4263 // Note: monitorenter & exit are symmetric routines; which is reflected
4264 //       in the assembly code structure as well
4265 //
4266 // Stack layout:
4267 //
4268 // [expressions  ] <--- rsp               = expression stack top
4269 // ..
4270 // [expressions  ]
4271 // [monitor entry] <--- monitor block top = expression stack bot
4272 // ..
4273 // [monitor entry]
4274 // [frame data   ] <--- monitor block bot
4275 // ...
4276 // [saved rbp    ] <--- rbp
4277 void TemplateTable::monitorenter() {
4278   transition(atos, vtos);
4279 
4280   // check for NULL object
4281   __ null_check(rax);
4282 




4283   const Address monitor_block_top(
4284         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4285   const Address monitor_block_bot(
4286         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4287   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4288 
4289   Label allocated;
4290 
4291   Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4292   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4293   Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4294 
4295   // initialize entry pointer
4296   __ xorl(rmon, rmon); // points to free slot or NULL
4297 
4298   // find a free slot in the monitor block (result in rmon)
4299   {
4300     Label entry, loop, exit;
4301     __ movptr(rtop, monitor_block_top); // points to current entry,
4302                                         // starting with top-most entry
4303     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4304                                         // of monitor block
4305     __ jmpb(entry);
4306 
4307     __ bind(loop);
4308     // check if current entry is used
4309     __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
4310     // if not used then remember entry in rmon
4311     __ cmovptr(Assembler::equal, rmon, rtop);   // cmov => cmovptr
4312     // check if current entry is for same object
4313     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4314     // if same object then stop searching
4315     __ jccb(Assembler::equal, exit);
4316     // otherwise advance to next entry
4317     __ addptr(rtop, entry_size);
4318     __ bind(entry);
4319     // check if bottom reached
4320     __ cmpptr(rtop, rbot);
4321     // if not at bottom then check this entry
4322     __ jcc(Assembler::notEqual, loop);
4323     __ bind(exit);
4324   }
4325 
4326   __ testptr(rmon, rmon); // check if a slot has been found
4327   __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4328 
4329   // allocate one if there's no free slot
4330   {
4331     Label entry, loop;
4332     // 1. compute new pointers          // rsp: old expression stack top
4333     __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4334     __ subptr(rsp, entry_size);         // move expression stack top
4335     __ subptr(rmon, entry_size);        // move expression stack bottom
4336     __ mov(rtop, rsp);                  // set start value for copy loop
4337     __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4338     __ jmp(entry);
4339     // 2. move expression stack contents
4340     __ bind(loop);
4341     __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4342                                                 // word from old location
4343     __ movptr(Address(rtop, 0), rbot);          // and store it at new location
4344     __ addptr(rtop, wordSize);                  // advance to next word
4345     __ bind(entry);
4346     __ cmpptr(rtop, rmon);                      // check if bottom reached
4347     __ jcc(Assembler::notEqual, loop);          // if not at bottom then
4348                                                 // copy next word
4349   }
4350 
4351   // call run-time routine
4352   // rmon: points to monitor entry
4353   __ bind(allocated);
4354 
4355   // Increment bcp to point to the next bytecode, so exception
4356   // handling for async. exceptions work correctly.
4357   // The object has already been poped from the stack, so the
4358   // expression stack looks correct.
4359   __ increment(rbcp);
4360 
4361   // store object
4362   __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4363   __ lock_object(rmon);
4364 
4365   // check to make sure this monitor doesn't cause stack overflow after locking
4366   __ save_bcp();  // in case of exception
4367   __ generate_stack_overflow_check(0);
4368 
4369   // The bcp has already been incremented. Just need to dispatch to
4370   // next instruction.
4371   __ dispatch_next(vtos);





4372 }
4373 
4374 void TemplateTable::monitorexit() {
4375   transition(atos, vtos);
4376 
4377   // check for NULL object
4378   __ null_check(rax);
4379 











4380   const Address monitor_block_top(
4381         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4382   const Address monitor_block_bot(
4383         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4384   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4385 
4386   Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4387   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4388 
4389   Label found;
4390 
4391   // find matching slot
4392   {
4393     Label entry, loop;
4394     __ movptr(rtop, monitor_block_top); // points to current entry,
4395                                         // starting with top-most entry
4396     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4397                                         // of monitor block
4398     __ jmpb(entry);
4399 
4400     __ bind(loop);
4401     // check if current entry is for same object
4402     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4403     // if same object then stop searching
4404     __ jcc(Assembler::equal, found);
4405     // otherwise advance to next entry
4406     __ addptr(rtop, entry_size);
4407     __ bind(entry);
4408     // check if bottom reached
4409     __ cmpptr(rtop, rbot);
4410     // if not at bottom then check this entry
4411     __ jcc(Assembler::notEqual, loop);
4412   }
4413 
4414   // error handling. Unlocking was not block-structured
4415   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4416                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4417   __ should_not_reach_here();
4418 
4419   // call run-time routine
4420   __ bind(found);
4421   __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4422   __ unlock_object(rtop);
4423   __ pop_ptr(rax); // discard object
4424 }
4425 
4426 // Wide instructions
4427 void TemplateTable::wide() {
4428   transition(vtos, vtos);
4429   __ load_unsigned_byte(rbx, at_bcp(1));
4430   ExternalAddress wtable((address)Interpreter::_wentry_point);
4431   __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
4432   // Note: the rbcp increment step is part of the individual wide bytecode implementations
4433 }
4434 
4435 // Multi arrays
4436 void TemplateTable::multianewarray() {
4437   transition(vtos, atos);
4438 
4439   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4440   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4441   // last dim is on top of stack; we want address of first one:
4442   // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4443   // the latter wordSize to point to the beginning of the array.
4444   __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4445   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4446   __ load_unsigned_byte(rbx, at_bcp(3));
4447   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
4448 }
--- EOF ---