1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "compiler/disassembler.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "gc/shared/tlab_globals.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "interpreter/interpreterRuntime.hpp"
  32 #include "interpreter/interp_masm.hpp"
  33 #include "interpreter/templateTable.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/methodCounters.hpp"
  36 #include "oops/methodData.hpp"
  37 #include "oops/objArrayKlass.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/resolvedFieldEntry.hpp"
  40 #include "oops/resolvedIndyEntry.hpp"
  41 #include "oops/resolvedMethodEntry.hpp"
  42 #include "prims/jvmtiExport.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/safepointMechanism.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubRoutines.hpp"
  48 #include "runtime/synchronizer.hpp"
  49 #include "utilities/macros.hpp"
  50 
  51 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  52 
  53 // Global Register Names
  54 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  55 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  56 
  57 // Address Computation: local variables
  58 static inline Address iaddress(int n) {
  59   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  60 }
  61 
  62 static inline Address laddress(int n) {
  63   return iaddress(n + 1);
  64 }
  65 
  66 #ifndef _LP64
  67 static inline Address haddress(int n) {
  68   return iaddress(n + 0);
  69 }
  70 #endif
  71 
  72 static inline Address faddress(int n) {
  73   return iaddress(n);
  74 }
  75 
  76 static inline Address daddress(int n) {
  77   return laddress(n);
  78 }
  79 
  80 static inline Address aaddress(int n) {
  81   return iaddress(n);
  82 }
  83 
  84 static inline Address iaddress(Register r) {
  85   return Address(rlocals, r, Address::times_ptr);
  86 }
  87 
  88 static inline Address laddress(Register r) {
  89   return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
  90 }
  91 
  92 #ifndef _LP64
  93 static inline Address haddress(Register r)       {
  94   return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
  95 }
  96 #endif
  97 
  98 static inline Address faddress(Register r) {
  99   return iaddress(r);
 100 }
 101 
 102 static inline Address daddress(Register r) {
 103   return laddress(r);
 104 }
 105 
 106 static inline Address aaddress(Register r) {
 107   return iaddress(r);
 108 }
 109 
 110 
 111 // expression stack
 112 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
 113 // data beyond the rsp which is potentially unsafe in an MT environment;
 114 // an interrupt may overwrite that data.)
 115 static inline Address at_rsp   () {
 116   return Address(rsp, 0);
 117 }
 118 
 119 // At top of Java expression stack which may be different than esp().  It
 120 // isn't for category 1 objects.
 121 static inline Address at_tos   () {
 122   return Address(rsp,  Interpreter::expr_offset_in_bytes(0));
 123 }
 124 
 125 static inline Address at_tos_p1() {
 126   return Address(rsp,  Interpreter::expr_offset_in_bytes(1));
 127 }
 128 
 129 static inline Address at_tos_p2() {
 130   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
 131 }
 132 
 133 // Condition conversion
 134 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 135   switch (cc) {
 136   case TemplateTable::equal        : return Assembler::notEqual;
 137   case TemplateTable::not_equal    : return Assembler::equal;
 138   case TemplateTable::less         : return Assembler::greaterEqual;
 139   case TemplateTable::less_equal   : return Assembler::greater;
 140   case TemplateTable::greater      : return Assembler::lessEqual;
 141   case TemplateTable::greater_equal: return Assembler::less;
 142   }
 143   ShouldNotReachHere();
 144   return Assembler::zero;
 145 }
 146 
 147 
 148 
 149 // Miscellaneous helper routines
 150 // Store an oop (or null) at the address described by obj.
 151 // If val == noreg this means store a null
 152 
 153 
 154 static void do_oop_store(InterpreterMacroAssembler* _masm,
 155                          Address dst,
 156                          Register val,
 157                          DecoratorSet decorators = 0) {
 158   assert(val == noreg || val == rax, "parameter is just for looks");
 159   __ store_heap_oop(dst, val,
 160                     NOT_LP64(rdx) LP64_ONLY(rscratch2),
 161                     NOT_LP64(rbx) LP64_ONLY(r9),
 162                     NOT_LP64(rsi) LP64_ONLY(r8), decorators);
 163 }
 164 
 165 static void do_oop_load(InterpreterMacroAssembler* _masm,
 166                         Address src,
 167                         Register dst,
 168                         DecoratorSet decorators = 0) {
 169   __ load_heap_oop(dst, src, rdx, rbx, decorators);
 170 }
 171 
 172 Address TemplateTable::at_bcp(int offset) {
 173   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 174   return Address(rbcp, offset);
 175 }
 176 
 177 
 178 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 179                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 180                                    int byte_no) {
 181   if (!RewriteBytecodes)  return;
 182   Label L_patch_done;
 183 
 184   switch (bc) {
 185   case Bytecodes::_fast_aputfield:
 186   case Bytecodes::_fast_bputfield:
 187   case Bytecodes::_fast_zputfield:
 188   case Bytecodes::_fast_cputfield:
 189   case Bytecodes::_fast_dputfield:
 190   case Bytecodes::_fast_fputfield:
 191   case Bytecodes::_fast_iputfield:
 192   case Bytecodes::_fast_lputfield:
 193   case Bytecodes::_fast_sputfield:
 194     {
 195       // We skip bytecode quickening for putfield instructions when
 196       // the put_code written to the constant pool cache is zero.
 197       // This is required so that every execution of this instruction
 198       // calls out to InterpreterRuntime::resolve_get_put to do
 199       // additional, required work.
 200       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 201       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 202       __ load_field_entry(temp_reg, bc_reg);
 203       if (byte_no == f1_byte) {
 204         __ load_unsigned_byte(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset())));
 205       } else {
 206         __ load_unsigned_byte(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset())));
 207       }
 208 
 209       __ movl(bc_reg, bc);
 210       __ cmpl(temp_reg, (int) 0);
 211       __ jcc(Assembler::zero, L_patch_done);  // don't patch
 212     }
 213     break;
 214   default:
 215     assert(byte_no == -1, "sanity");
 216     // the pair bytecodes have already done the load.
 217     if (load_bc_into_bc_reg) {
 218       __ movl(bc_reg, bc);
 219     }
 220   }
 221 
 222   if (JvmtiExport::can_post_breakpoint()) {
 223     Label L_fast_patch;
 224     // if a breakpoint is present we can't rewrite the stream directly
 225     __ movzbl(temp_reg, at_bcp(0));
 226     __ cmpl(temp_reg, Bytecodes::_breakpoint);
 227     __ jcc(Assembler::notEqual, L_fast_patch);
 228     __ get_method(temp_reg);
 229     // Let breakpoint table handling rewrite to quicker bytecode
 230     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
 231 #ifndef ASSERT
 232     __ jmpb(L_patch_done);
 233 #else
 234     __ jmp(L_patch_done);
 235 #endif
 236     __ bind(L_fast_patch);
 237   }
 238 
 239 #ifdef ASSERT
 240   Label L_okay;
 241   __ load_unsigned_byte(temp_reg, at_bcp(0));
 242   __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
 243   __ jcc(Assembler::equal, L_okay);
 244   __ cmpl(temp_reg, bc_reg);
 245   __ jcc(Assembler::equal, L_okay);
 246   __ stop("patching the wrong bytecode");
 247   __ bind(L_okay);
 248 #endif
 249 
 250   // patch bytecode
 251   __ movb(at_bcp(0), bc_reg);
 252   __ bind(L_patch_done);
 253 }
 254 // Individual instructions
 255 
 256 
 257 void TemplateTable::nop() {
 258   transition(vtos, vtos);
 259   // nothing to do
 260 }
 261 
 262 void TemplateTable::shouldnotreachhere() {
 263   transition(vtos, vtos);
 264   __ stop("shouldnotreachhere bytecode");
 265 }
 266 
 267 void TemplateTable::aconst_null() {
 268   transition(vtos, atos);
 269   __ xorl(rax, rax);
 270 }
 271 
 272 void TemplateTable::iconst(int value) {
 273   transition(vtos, itos);
 274   if (value == 0) {
 275     __ xorl(rax, rax);
 276   } else {
 277     __ movl(rax, value);
 278   }
 279 }
 280 
 281 void TemplateTable::lconst(int value) {
 282   transition(vtos, ltos);
 283   if (value == 0) {
 284     __ xorl(rax, rax);
 285   } else {
 286     __ movl(rax, value);
 287   }
 288 #ifndef _LP64
 289   assert(value >= 0, "check this code");
 290   __ xorptr(rdx, rdx);
 291 #endif
 292 }
 293 
 294 
 295 
 296 void TemplateTable::fconst(int value) {
 297   transition(vtos, ftos);
 298   if (UseSSE >= 1) {
 299     static float one = 1.0f, two = 2.0f;
 300     switch (value) {
 301     case 0:
 302       __ xorps(xmm0, xmm0);
 303       break;
 304     case 1:
 305       __ movflt(xmm0, ExternalAddress((address) &one), rscratch1);
 306       break;
 307     case 2:
 308       __ movflt(xmm0, ExternalAddress((address) &two), rscratch1);
 309       break;
 310     default:
 311       ShouldNotReachHere();
 312       break;
 313     }
 314   } else {
 315 #ifdef _LP64
 316     ShouldNotReachHere();
 317 #else
 318            if (value == 0) { __ fldz();
 319     } else if (value == 1) { __ fld1();
 320     } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
 321     } else                 { ShouldNotReachHere();
 322     }
 323 #endif // _LP64
 324   }
 325 }
 326 
 327 void TemplateTable::dconst(int value) {
 328   transition(vtos, dtos);
 329   if (UseSSE >= 2) {
 330     static double one = 1.0;
 331     switch (value) {
 332     case 0:
 333       __ xorpd(xmm0, xmm0);
 334       break;
 335     case 1:
 336       __ movdbl(xmm0, ExternalAddress((address) &one), rscratch1);
 337       break;
 338     default:
 339       ShouldNotReachHere();
 340       break;
 341     }
 342   } else {
 343 #ifdef _LP64
 344     ShouldNotReachHere();
 345 #else
 346            if (value == 0) { __ fldz();
 347     } else if (value == 1) { __ fld1();
 348     } else                 { ShouldNotReachHere();
 349     }
 350 #endif
 351   }
 352 }
 353 
 354 void TemplateTable::bipush() {
 355   transition(vtos, itos);
 356   __ load_signed_byte(rax, at_bcp(1));
 357 }
 358 
 359 void TemplateTable::sipush() {
 360   transition(vtos, itos);
 361   __ load_unsigned_short(rax, at_bcp(1));
 362   __ bswapl(rax);
 363   __ sarl(rax, 16);
 364 }
 365 
 366 void TemplateTable::ldc(LdcType type) {
 367   transition(vtos, vtos);
 368   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 369   Label call_ldc, notFloat, notClass, notInt, Done;
 370 
 371   if (is_ldc_wide(type)) {
 372     __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 373   } else {
 374     __ load_unsigned_byte(rbx, at_bcp(1));
 375   }
 376 
 377   __ get_cpool_and_tags(rcx, rax);
 378   const int base_offset = ConstantPool::header_size() * wordSize;
 379   const int tags_offset = Array<u1>::base_offset_in_bytes();
 380 
 381   // get type
 382   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 383 
 384   // unresolved class - get the resolved class
 385   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
 386   __ jccb(Assembler::equal, call_ldc);
 387 
 388   // unresolved class in error state - call into runtime to throw the error
 389   // from the first resolution attempt
 390   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
 391   __ jccb(Assembler::equal, call_ldc);
 392 
 393   // resolved class - need to call vm to get java mirror of the class
 394   __ cmpl(rdx, JVM_CONSTANT_Class);
 395   __ jcc(Assembler::notEqual, notClass);
 396 
 397   __ bind(call_ldc);
 398 
 399   __ movl(rarg, is_ldc_wide(type) ? 1 : 0);
 400   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
 401 
 402   __ push(atos);
 403   __ jmp(Done);
 404 
 405   __ bind(notClass);
 406   __ cmpl(rdx, JVM_CONSTANT_Float);
 407   __ jccb(Assembler::notEqual, notFloat);
 408 
 409   // ftos
 410   __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
 411   __ push(ftos);
 412   __ jmp(Done);
 413 
 414   __ bind(notFloat);
 415   __ cmpl(rdx, JVM_CONSTANT_Integer);
 416   __ jccb(Assembler::notEqual, notInt);
 417 
 418   // itos
 419   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
 420   __ push(itos);
 421   __ jmp(Done);
 422 
 423   // assume the tag is for condy; if not, the VM runtime will tell us
 424   __ bind(notInt);
 425   condy_helper(Done);
 426 
 427   __ bind(Done);
 428 }
 429 
 430 // Fast path for caching oop constants.
 431 void TemplateTable::fast_aldc(LdcType type) {
 432   transition(vtos, atos);
 433 
 434   Register result = rax;
 435   Register tmp = rdx;
 436   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 437   int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
 438 
 439   Label resolved;
 440 
 441   // We are resolved if the resolved reference cache entry contains a
 442   // non-null object (String, MethodType, etc.)
 443   assert_different_registers(result, tmp);
 444   __ get_cache_index_at_bcp(tmp, 1, index_size);
 445   __ load_resolved_reference_at_index(result, tmp);
 446   __ testptr(result, result);
 447   __ jcc(Assembler::notZero, resolved);
 448 
 449   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 450 
 451   // first time invocation - must resolve first
 452   __ movl(rarg, (int)bytecode());
 453   __ call_VM(result, entry, rarg);
 454   __ bind(resolved);
 455 
 456   { // Check for the null sentinel.
 457     // If we just called the VM, it already did the mapping for us,
 458     // but it's harmless to retry.
 459     Label notNull;
 460     ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
 461     __ movptr(tmp, null_sentinel);
 462     __ resolve_oop_handle(tmp, rscratch2);
 463     __ cmpoop(tmp, result);
 464     __ jccb(Assembler::notEqual, notNull);
 465     __ xorptr(result, result);  // null object reference
 466     __ bind(notNull);
 467   }
 468 
 469   if (VerifyOops) {
 470     __ verify_oop(result);
 471   }
 472 }
 473 
 474 void TemplateTable::ldc2_w() {
 475   transition(vtos, vtos);
 476   Label notDouble, notLong, Done;
 477   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 478 
 479   __ get_cpool_and_tags(rcx, rax);
 480   const int base_offset = ConstantPool::header_size() * wordSize;
 481   const int tags_offset = Array<u1>::base_offset_in_bytes();
 482 
 483   // get type
 484   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 485   __ cmpl(rdx, JVM_CONSTANT_Double);
 486   __ jccb(Assembler::notEqual, notDouble);
 487 
 488   // dtos
 489   __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
 490   __ push(dtos);
 491 
 492   __ jmp(Done);
 493   __ bind(notDouble);
 494   __ cmpl(rdx, JVM_CONSTANT_Long);
 495   __ jccb(Assembler::notEqual, notLong);
 496 
 497   // ltos
 498   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
 499   NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
 500   __ push(ltos);
 501   __ jmp(Done);
 502 
 503   __ bind(notLong);
 504   condy_helper(Done);
 505 
 506   __ bind(Done);
 507 }
 508 
 509 void TemplateTable::condy_helper(Label& Done) {
 510   const Register obj = rax;
 511   const Register off = rbx;
 512   const Register flags = rcx;
 513   const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 514   __ movl(rarg, (int)bytecode());
 515   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
 516 #ifndef _LP64
 517   // borrow rdi from locals
 518   __ get_thread(rdi);
 519   __ get_vm_result_2(flags, rdi);
 520   __ restore_locals();
 521 #else
 522   __ get_vm_result_2(flags, r15_thread);
 523 #endif
 524   // VMr = obj = base address to find primitive value to push
 525   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 526   __ movl(off, flags);
 527   __ andl(off, ConstantPoolCache::field_index_mask);
 528   const Address field(obj, off, Address::times_1, 0*wordSize);
 529 
 530   // What sort of thing are we loading?
 531   __ shrl(flags, ConstantPoolCache::tos_state_shift);
 532   __ andl(flags, ConstantPoolCache::tos_state_mask);
 533 
 534   switch (bytecode()) {
 535   case Bytecodes::_ldc:
 536   case Bytecodes::_ldc_w:
 537     {
 538       // tos in (itos, ftos, stos, btos, ctos, ztos)
 539       Label notInt, notFloat, notShort, notByte, notChar, notBool;
 540       __ cmpl(flags, itos);
 541       __ jccb(Assembler::notEqual, notInt);
 542       // itos
 543       __ movl(rax, field);
 544       __ push(itos);
 545       __ jmp(Done);
 546 
 547       __ bind(notInt);
 548       __ cmpl(flags, ftos);
 549       __ jccb(Assembler::notEqual, notFloat);
 550       // ftos
 551       __ load_float(field);
 552       __ push(ftos);
 553       __ jmp(Done);
 554 
 555       __ bind(notFloat);
 556       __ cmpl(flags, stos);
 557       __ jccb(Assembler::notEqual, notShort);
 558       // stos
 559       __ load_signed_short(rax, field);
 560       __ push(stos);
 561       __ jmp(Done);
 562 
 563       __ bind(notShort);
 564       __ cmpl(flags, btos);
 565       __ jccb(Assembler::notEqual, notByte);
 566       // btos
 567       __ load_signed_byte(rax, field);
 568       __ push(btos);
 569       __ jmp(Done);
 570 
 571       __ bind(notByte);
 572       __ cmpl(flags, ctos);
 573       __ jccb(Assembler::notEqual, notChar);
 574       // ctos
 575       __ load_unsigned_short(rax, field);
 576       __ push(ctos);
 577       __ jmp(Done);
 578 
 579       __ bind(notChar);
 580       __ cmpl(flags, ztos);
 581       __ jccb(Assembler::notEqual, notBool);
 582       // ztos
 583       __ load_signed_byte(rax, field);
 584       __ push(ztos);
 585       __ jmp(Done);
 586 
 587       __ bind(notBool);
 588       break;
 589     }
 590 
 591   case Bytecodes::_ldc2_w:
 592     {
 593       Label notLong, notDouble;
 594       __ cmpl(flags, ltos);
 595       __ jccb(Assembler::notEqual, notLong);
 596       // ltos
 597       // Loading high word first because movptr clobbers rax
 598       NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
 599       __ movptr(rax, field);
 600       __ push(ltos);
 601       __ jmp(Done);
 602 
 603       __ bind(notLong);
 604       __ cmpl(flags, dtos);
 605       __ jccb(Assembler::notEqual, notDouble);
 606       // dtos
 607       __ load_double(field);
 608       __ push(dtos);
 609       __ jmp(Done);
 610 
 611       __ bind(notDouble);
 612       break;
 613     }
 614 
 615   default:
 616     ShouldNotReachHere();
 617   }
 618 
 619   __ stop("bad ldc/condy");
 620 }
 621 
 622 void TemplateTable::locals_index(Register reg, int offset) {
 623   __ load_unsigned_byte(reg, at_bcp(offset));
 624   __ negptr(reg);
 625 }
 626 
 627 void TemplateTable::iload() {
 628   iload_internal();
 629 }
 630 
 631 void TemplateTable::nofast_iload() {
 632   iload_internal(may_not_rewrite);
 633 }
 634 
 635 void TemplateTable::iload_internal(RewriteControl rc) {
 636   transition(vtos, itos);
 637   if (RewriteFrequentPairs && rc == may_rewrite) {
 638     Label rewrite, done;
 639     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 640     LP64_ONLY(assert(rbx != bc, "register damaged"));
 641 
 642     // get next byte
 643     __ load_unsigned_byte(rbx,
 644                           at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 645     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 646     // last two iloads in a pair.  Comparing against fast_iload means that
 647     // the next bytecode is neither an iload or a caload, and therefore
 648     // an iload pair.
 649     __ cmpl(rbx, Bytecodes::_iload);
 650     __ jcc(Assembler::equal, done);
 651 
 652     __ cmpl(rbx, Bytecodes::_fast_iload);
 653     __ movl(bc, Bytecodes::_fast_iload2);
 654 
 655     __ jccb(Assembler::equal, rewrite);
 656 
 657     // if _caload, rewrite to fast_icaload
 658     __ cmpl(rbx, Bytecodes::_caload);
 659     __ movl(bc, Bytecodes::_fast_icaload);
 660     __ jccb(Assembler::equal, rewrite);
 661 
 662     // rewrite so iload doesn't check again.
 663     __ movl(bc, Bytecodes::_fast_iload);
 664 
 665     // rewrite
 666     // bc: fast bytecode
 667     __ bind(rewrite);
 668     patch_bytecode(Bytecodes::_iload, bc, rbx, false);
 669     __ bind(done);
 670   }
 671 
 672   // Get the local value into tos
 673   locals_index(rbx);
 674   __ movl(rax, iaddress(rbx));
 675 }
 676 
 677 void TemplateTable::fast_iload2() {
 678   transition(vtos, itos);
 679   locals_index(rbx);
 680   __ movl(rax, iaddress(rbx));
 681   __ push(itos);
 682   locals_index(rbx, 3);
 683   __ movl(rax, iaddress(rbx));
 684 }
 685 
 686 void TemplateTable::fast_iload() {
 687   transition(vtos, itos);
 688   locals_index(rbx);
 689   __ movl(rax, iaddress(rbx));
 690 }
 691 
 692 void TemplateTable::lload() {
 693   transition(vtos, ltos);
 694   locals_index(rbx);
 695   __ movptr(rax, laddress(rbx));
 696   NOT_LP64(__ movl(rdx, haddress(rbx)));
 697 }
 698 
 699 void TemplateTable::fload() {
 700   transition(vtos, ftos);
 701   locals_index(rbx);
 702   __ load_float(faddress(rbx));
 703 }
 704 
 705 void TemplateTable::dload() {
 706   transition(vtos, dtos);
 707   locals_index(rbx);
 708   __ load_double(daddress(rbx));
 709 }
 710 
 711 void TemplateTable::aload() {
 712   transition(vtos, atos);
 713   locals_index(rbx);
 714   __ movptr(rax, aaddress(rbx));
 715 }
 716 
 717 void TemplateTable::locals_index_wide(Register reg) {
 718   __ load_unsigned_short(reg, at_bcp(2));
 719   __ bswapl(reg);
 720   __ shrl(reg, 16);
 721   __ negptr(reg);
 722 }
 723 
 724 void TemplateTable::wide_iload() {
 725   transition(vtos, itos);
 726   locals_index_wide(rbx);
 727   __ movl(rax, iaddress(rbx));
 728 }
 729 
 730 void TemplateTable::wide_lload() {
 731   transition(vtos, ltos);
 732   locals_index_wide(rbx);
 733   __ movptr(rax, laddress(rbx));
 734   NOT_LP64(__ movl(rdx, haddress(rbx)));
 735 }
 736 
 737 void TemplateTable::wide_fload() {
 738   transition(vtos, ftos);
 739   locals_index_wide(rbx);
 740   __ load_float(faddress(rbx));
 741 }
 742 
 743 void TemplateTable::wide_dload() {
 744   transition(vtos, dtos);
 745   locals_index_wide(rbx);
 746   __ load_double(daddress(rbx));
 747 }
 748 
 749 void TemplateTable::wide_aload() {
 750   transition(vtos, atos);
 751   locals_index_wide(rbx);
 752   __ movptr(rax, aaddress(rbx));
 753 }
 754 
 755 void TemplateTable::index_check(Register array, Register index) {
 756   // Pop ptr into array
 757   __ pop_ptr(array);
 758   index_check_without_pop(array, index);
 759 }
 760 
 761 void TemplateTable::index_check_without_pop(Register array, Register index) {
 762   // destroys rbx
 763   // sign extend index for use by indexed load
 764   __ movl2ptr(index, index);
 765   // check index
 766   __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
 767   if (index != rbx) {
 768     // ??? convention: move aberrant index into rbx for exception message
 769     assert(rbx != array, "different registers");
 770     __ movl(rbx, index);
 771   }
 772   Label skip;
 773   __ jccb(Assembler::below, skip);
 774   // Pass array to create more detailed exceptions.
 775   __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
 776   __ jump(RuntimeAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
 777   __ bind(skip);
 778 }
 779 
 780 void TemplateTable::iaload() {
 781   transition(itos, itos);
 782   // rax: index
 783   // rdx: array
 784   index_check(rdx, rax); // kills rbx
 785   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
 786                     Address(rdx, rax, Address::times_4,
 787                             arrayOopDesc::base_offset_in_bytes(T_INT)),
 788                     noreg, noreg);
 789 }
 790 
 791 void TemplateTable::laload() {
 792   transition(itos, ltos);
 793   // rax: index
 794   // rdx: array
 795   index_check(rdx, rax); // kills rbx
 796   NOT_LP64(__ mov(rbx, rax));
 797   // rbx,: index
 798   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
 799                     Address(rdx, rbx, Address::times_8,
 800                             arrayOopDesc::base_offset_in_bytes(T_LONG)),
 801                     noreg, noreg);
 802 }
 803 
 804 
 805 
 806 void TemplateTable::faload() {
 807   transition(itos, ftos);
 808   // rax: index
 809   // rdx: array
 810   index_check(rdx, rax); // kills rbx
 811   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
 812                     Address(rdx, rax,
 813                             Address::times_4,
 814                             arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
 815                     noreg, noreg);
 816 }
 817 
 818 void TemplateTable::daload() {
 819   transition(itos, dtos);
 820   // rax: index
 821   // rdx: array
 822   index_check(rdx, rax); // kills rbx
 823   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
 824                     Address(rdx, rax,
 825                             Address::times_8,
 826                             arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
 827                     noreg, noreg);
 828 }
 829 
 830 void TemplateTable::aaload() {
 831   transition(itos, atos);
 832   // rax: index
 833   // rdx: array
 834   index_check(rdx, rax); // kills rbx
 835   do_oop_load(_masm,
 836               Address(rdx, rax,
 837                       UseCompressedOops ? Address::times_4 : Address::times_ptr,
 838                       arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 839               rax,
 840               IS_ARRAY);
 841 }
 842 
 843 void TemplateTable::baload() {
 844   transition(itos, itos);
 845   // rax: index
 846   // rdx: array
 847   index_check(rdx, rax); // kills rbx
 848   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
 849                     Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
 850                     noreg, noreg);
 851 }
 852 
 853 void TemplateTable::caload() {
 854   transition(itos, itos);
 855   // rax: index
 856   // rdx: array
 857   index_check(rdx, rax); // kills rbx
 858   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 859                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 860                     noreg, noreg);
 861 }
 862 
 863 // iload followed by caload frequent pair
 864 void TemplateTable::fast_icaload() {
 865   transition(vtos, itos);
 866   // load index out of locals
 867   locals_index(rbx);
 868   __ movl(rax, iaddress(rbx));
 869 
 870   // rax: index
 871   // rdx: array
 872   index_check(rdx, rax); // kills rbx
 873   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 874                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 875                     noreg, noreg);
 876 }
 877 
 878 
 879 void TemplateTable::saload() {
 880   transition(itos, itos);
 881   // rax: index
 882   // rdx: array
 883   index_check(rdx, rax); // kills rbx
 884   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
 885                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
 886                     noreg, noreg);
 887 }
 888 
 889 void TemplateTable::iload(int n) {
 890   transition(vtos, itos);
 891   __ movl(rax, iaddress(n));
 892 }
 893 
 894 void TemplateTable::lload(int n) {
 895   transition(vtos, ltos);
 896   __ movptr(rax, laddress(n));
 897   NOT_LP64(__ movptr(rdx, haddress(n)));
 898 }
 899 
 900 void TemplateTable::fload(int n) {
 901   transition(vtos, ftos);
 902   __ load_float(faddress(n));
 903 }
 904 
 905 void TemplateTable::dload(int n) {
 906   transition(vtos, dtos);
 907   __ load_double(daddress(n));
 908 }
 909 
 910 void TemplateTable::aload(int n) {
 911   transition(vtos, atos);
 912   __ movptr(rax, aaddress(n));
 913 }
 914 
 915 void TemplateTable::aload_0() {
 916   aload_0_internal();
 917 }
 918 
 919 void TemplateTable::nofast_aload_0() {
 920   aload_0_internal(may_not_rewrite);
 921 }
 922 
 923 void TemplateTable::aload_0_internal(RewriteControl rc) {
 924   transition(vtos, atos);
 925   // According to bytecode histograms, the pairs:
 926   //
 927   // _aload_0, _fast_igetfield
 928   // _aload_0, _fast_agetfield
 929   // _aload_0, _fast_fgetfield
 930   //
 931   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 932   // _aload_0 bytecode checks if the next bytecode is either
 933   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 934   // rewrites the current bytecode into a pair bytecode; otherwise it
 935   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 936   // the pair check anymore.
 937   //
 938   // Note: If the next bytecode is _getfield, the rewrite must be
 939   //       delayed, otherwise we may miss an opportunity for a pair.
 940   //
 941   // Also rewrite frequent pairs
 942   //   aload_0, aload_1
 943   //   aload_0, iload_1
 944   // These bytecodes with a small amount of code are most profitable
 945   // to rewrite
 946   if (RewriteFrequentPairs && rc == may_rewrite) {
 947     Label rewrite, done;
 948 
 949     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 950     LP64_ONLY(assert(rbx != bc, "register damaged"));
 951 
 952     // get next byte
 953     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 954 
 955     // if _getfield then wait with rewrite
 956     __ cmpl(rbx, Bytecodes::_getfield);
 957     __ jcc(Assembler::equal, done);
 958 
 959     // if _igetfield then rewrite to _fast_iaccess_0
 960     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 961     __ cmpl(rbx, Bytecodes::_fast_igetfield);
 962     __ movl(bc, Bytecodes::_fast_iaccess_0);
 963     __ jccb(Assembler::equal, rewrite);
 964 
 965     // if _agetfield then rewrite to _fast_aaccess_0
 966     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 967     __ cmpl(rbx, Bytecodes::_fast_agetfield);
 968     __ movl(bc, Bytecodes::_fast_aaccess_0);
 969     __ jccb(Assembler::equal, rewrite);
 970 
 971     // if _fgetfield then rewrite to _fast_faccess_0
 972     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 973     __ cmpl(rbx, Bytecodes::_fast_fgetfield);
 974     __ movl(bc, Bytecodes::_fast_faccess_0);
 975     __ jccb(Assembler::equal, rewrite);
 976 
 977     // else rewrite to _fast_aload0
 978     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 979     __ movl(bc, Bytecodes::_fast_aload_0);
 980 
 981     // rewrite
 982     // bc: fast bytecode
 983     __ bind(rewrite);
 984     patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
 985 
 986     __ bind(done);
 987   }
 988 
 989   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 990   aload(0);
 991 }
 992 
 993 void TemplateTable::istore() {
 994   transition(itos, vtos);
 995   locals_index(rbx);
 996   __ movl(iaddress(rbx), rax);
 997 }
 998 
 999 
1000 void TemplateTable::lstore() {
1001   transition(ltos, vtos);
1002   locals_index(rbx);
1003   __ movptr(laddress(rbx), rax);
1004   NOT_LP64(__ movptr(haddress(rbx), rdx));
1005 }
1006 
1007 void TemplateTable::fstore() {
1008   transition(ftos, vtos);
1009   locals_index(rbx);
1010   __ store_float(faddress(rbx));
1011 }
1012 
1013 void TemplateTable::dstore() {
1014   transition(dtos, vtos);
1015   locals_index(rbx);
1016   __ store_double(daddress(rbx));
1017 }
1018 
1019 void TemplateTable::astore() {
1020   transition(vtos, vtos);
1021   __ pop_ptr(rax);
1022   locals_index(rbx);
1023   __ movptr(aaddress(rbx), rax);
1024 }
1025 
1026 void TemplateTable::wide_istore() {
1027   transition(vtos, vtos);
1028   __ pop_i();
1029   locals_index_wide(rbx);
1030   __ movl(iaddress(rbx), rax);
1031 }
1032 
1033 void TemplateTable::wide_lstore() {
1034   transition(vtos, vtos);
1035   NOT_LP64(__ pop_l(rax, rdx));
1036   LP64_ONLY(__ pop_l());
1037   locals_index_wide(rbx);
1038   __ movptr(laddress(rbx), rax);
1039   NOT_LP64(__ movl(haddress(rbx), rdx));
1040 }
1041 
1042 void TemplateTable::wide_fstore() {
1043 #ifdef _LP64
1044   transition(vtos, vtos);
1045   __ pop_f(xmm0);
1046   locals_index_wide(rbx);
1047   __ movflt(faddress(rbx), xmm0);
1048 #else
1049   wide_istore();
1050 #endif
1051 }
1052 
1053 void TemplateTable::wide_dstore() {
1054 #ifdef _LP64
1055   transition(vtos, vtos);
1056   __ pop_d(xmm0);
1057   locals_index_wide(rbx);
1058   __ movdbl(daddress(rbx), xmm0);
1059 #else
1060   wide_lstore();
1061 #endif
1062 }
1063 
1064 void TemplateTable::wide_astore() {
1065   transition(vtos, vtos);
1066   __ pop_ptr(rax);
1067   locals_index_wide(rbx);
1068   __ movptr(aaddress(rbx), rax);
1069 }
1070 
1071 void TemplateTable::iastore() {
1072   transition(itos, vtos);
1073   __ pop_i(rbx);
1074   // rax: value
1075   // rbx: index
1076   // rdx: array
1077   index_check(rdx, rbx); // prefer index in rbx
1078   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1079                      Address(rdx, rbx, Address::times_4,
1080                              arrayOopDesc::base_offset_in_bytes(T_INT)),
1081                      rax, noreg, noreg, noreg);
1082 }
1083 
1084 void TemplateTable::lastore() {
1085   transition(ltos, vtos);
1086   __ pop_i(rbx);
1087   // rax,: low(value)
1088   // rcx: array
1089   // rdx: high(value)
1090   index_check(rcx, rbx);  // prefer index in rbx,
1091   // rbx,: index
1092   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1093                      Address(rcx, rbx, Address::times_8,
1094                              arrayOopDesc::base_offset_in_bytes(T_LONG)),
1095                      noreg /* ltos */, noreg, noreg, noreg);
1096 }
1097 
1098 
1099 void TemplateTable::fastore() {
1100   transition(ftos, vtos);
1101   __ pop_i(rbx);
1102   // value is in UseSSE >= 1 ? xmm0 : ST(0)
1103   // rbx:  index
1104   // rdx:  array
1105   index_check(rdx, rbx); // prefer index in rbx
1106   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1107                      Address(rdx, rbx, Address::times_4,
1108                              arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1109                      noreg /* ftos */, noreg, noreg, noreg);
1110 }
1111 
1112 void TemplateTable::dastore() {
1113   transition(dtos, vtos);
1114   __ pop_i(rbx);
1115   // value is in UseSSE >= 2 ? xmm0 : ST(0)
1116   // rbx:  index
1117   // rdx:  array
1118   index_check(rdx, rbx); // prefer index in rbx
1119   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1120                      Address(rdx, rbx, Address::times_8,
1121                              arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1122                      noreg /* dtos */, noreg, noreg, noreg);
1123 }
1124 
1125 void TemplateTable::aastore() {
1126   Label is_null, ok_is_subtype, done;
1127   transition(vtos, vtos);
1128   // stack: ..., array, index, value
1129   __ movptr(rax, at_tos());    // value
1130   __ movl(rcx, at_tos_p1()); // index
1131   __ movptr(rdx, at_tos_p2()); // array
1132 
1133   Address element_address(rdx, rcx,
1134                           UseCompressedOops? Address::times_4 : Address::times_ptr,
1135                           arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1136 
1137   index_check_without_pop(rdx, rcx);     // kills rbx
1138   __ testptr(rax, rax);
1139   __ jcc(Assembler::zero, is_null);
1140 
1141   // Move subklass into rbx
1142   __ load_klass(rbx, rax, rscratch1);
1143   // Move superklass into rax
1144   __ load_klass(rax, rdx, rscratch1);
1145   __ movptr(rax, Address(rax,
1146                          ObjArrayKlass::element_klass_offset()));
1147 
1148   // Generate subtype check.  Blows rcx, rdi
1149   // Superklass in rax.  Subklass in rbx.
1150   __ gen_subtype_check(rbx, ok_is_subtype);
1151 
1152   // Come here on failure
1153   // object is at TOS
1154   __ jump(RuntimeAddress(Interpreter::_throw_ArrayStoreException_entry));
1155 
1156   // Come here on success
1157   __ bind(ok_is_subtype);
1158 
1159   // Get the value we will store
1160   __ movptr(rax, at_tos());
1161   __ movl(rcx, at_tos_p1()); // index
1162   // Now store using the appropriate barrier
1163   do_oop_store(_masm, element_address, rax, IS_ARRAY);
1164   __ jmp(done);
1165 
1166   // Have a null in rax, rdx=array, ecx=index.  Store null at ary[idx]
1167   __ bind(is_null);
1168   __ profile_null_seen(rbx);
1169 
1170   // Store a null
1171   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1172 
1173   // Pop stack arguments
1174   __ bind(done);
1175   __ addptr(rsp, 3 * Interpreter::stackElementSize);
1176 }
1177 
1178 void TemplateTable::bastore() {
1179   transition(itos, vtos);
1180   __ pop_i(rbx);
1181   // rax: value
1182   // rbx: index
1183   // rdx: array
1184   index_check(rdx, rbx); // prefer index in rbx
1185   // Need to check whether array is boolean or byte
1186   // since both types share the bastore bytecode.
1187   __ load_klass(rcx, rdx, rscratch1);
1188   __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1189   int diffbit = Klass::layout_helper_boolean_diffbit();
1190   __ testl(rcx, diffbit);
1191   Label L_skip;
1192   __ jccb(Assembler::zero, L_skip);
1193   __ andl(rax, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1194   __ bind(L_skip);
1195   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1196                      Address(rdx, rbx,Address::times_1,
1197                              arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1198                      rax, noreg, noreg, noreg);
1199 }
1200 
1201 void TemplateTable::castore() {
1202   transition(itos, vtos);
1203   __ pop_i(rbx);
1204   // rax: value
1205   // rbx: index
1206   // rdx: array
1207   index_check(rdx, rbx);  // prefer index in rbx
1208   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1209                      Address(rdx, rbx, Address::times_2,
1210                              arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1211                      rax, noreg, noreg, noreg);
1212 }
1213 
1214 
1215 void TemplateTable::sastore() {
1216   castore();
1217 }
1218 
1219 void TemplateTable::istore(int n) {
1220   transition(itos, vtos);
1221   __ movl(iaddress(n), rax);
1222 }
1223 
1224 void TemplateTable::lstore(int n) {
1225   transition(ltos, vtos);
1226   __ movptr(laddress(n), rax);
1227   NOT_LP64(__ movptr(haddress(n), rdx));
1228 }
1229 
1230 void TemplateTable::fstore(int n) {
1231   transition(ftos, vtos);
1232   __ store_float(faddress(n));
1233 }
1234 
1235 void TemplateTable::dstore(int n) {
1236   transition(dtos, vtos);
1237   __ store_double(daddress(n));
1238 }
1239 
1240 
1241 void TemplateTable::astore(int n) {
1242   transition(vtos, vtos);
1243   __ pop_ptr(rax);
1244   __ movptr(aaddress(n), rax);
1245 }
1246 
1247 void TemplateTable::pop() {
1248   transition(vtos, vtos);
1249   __ addptr(rsp, Interpreter::stackElementSize);
1250 }
1251 
1252 void TemplateTable::pop2() {
1253   transition(vtos, vtos);
1254   __ addptr(rsp, 2 * Interpreter::stackElementSize);
1255 }
1256 
1257 
1258 void TemplateTable::dup() {
1259   transition(vtos, vtos);
1260   __ load_ptr(0, rax);
1261   __ push_ptr(rax);
1262   // stack: ..., a, a
1263 }
1264 
1265 void TemplateTable::dup_x1() {
1266   transition(vtos, vtos);
1267   // stack: ..., a, b
1268   __ load_ptr( 0, rax);  // load b
1269   __ load_ptr( 1, rcx);  // load a
1270   __ store_ptr(1, rax);  // store b
1271   __ store_ptr(0, rcx);  // store a
1272   __ push_ptr(rax);      // push b
1273   // stack: ..., b, a, b
1274 }
1275 
1276 void TemplateTable::dup_x2() {
1277   transition(vtos, vtos);
1278   // stack: ..., a, b, c
1279   __ load_ptr( 0, rax);  // load c
1280   __ load_ptr( 2, rcx);  // load a
1281   __ store_ptr(2, rax);  // store c in a
1282   __ push_ptr(rax);      // push c
1283   // stack: ..., c, b, c, c
1284   __ load_ptr( 2, rax);  // load b
1285   __ store_ptr(2, rcx);  // store a in b
1286   // stack: ..., c, a, c, c
1287   __ store_ptr(1, rax);  // store b in c
1288   // stack: ..., c, a, b, c
1289 }
1290 
1291 void TemplateTable::dup2() {
1292   transition(vtos, vtos);
1293   // stack: ..., a, b
1294   __ load_ptr(1, rax);  // load a
1295   __ push_ptr(rax);     // push a
1296   __ load_ptr(1, rax);  // load b
1297   __ push_ptr(rax);     // push b
1298   // stack: ..., a, b, a, b
1299 }
1300 
1301 
1302 void TemplateTable::dup2_x1() {
1303   transition(vtos, vtos);
1304   // stack: ..., a, b, c
1305   __ load_ptr( 0, rcx);  // load c
1306   __ load_ptr( 1, rax);  // load b
1307   __ push_ptr(rax);      // push b
1308   __ push_ptr(rcx);      // push c
1309   // stack: ..., a, b, c, b, c
1310   __ store_ptr(3, rcx);  // store c in b
1311   // stack: ..., a, c, c, b, c
1312   __ load_ptr( 4, rcx);  // load a
1313   __ store_ptr(2, rcx);  // store a in 2nd c
1314   // stack: ..., a, c, a, b, c
1315   __ store_ptr(4, rax);  // store b in a
1316   // stack: ..., b, c, a, b, c
1317 }
1318 
1319 void TemplateTable::dup2_x2() {
1320   transition(vtos, vtos);
1321   // stack: ..., a, b, c, d
1322   __ load_ptr( 0, rcx);  // load d
1323   __ load_ptr( 1, rax);  // load c
1324   __ push_ptr(rax);      // push c
1325   __ push_ptr(rcx);      // push d
1326   // stack: ..., a, b, c, d, c, d
1327   __ load_ptr( 4, rax);  // load b
1328   __ store_ptr(2, rax);  // store b in d
1329   __ store_ptr(4, rcx);  // store d in b
1330   // stack: ..., a, d, c, b, c, d
1331   __ load_ptr( 5, rcx);  // load a
1332   __ load_ptr( 3, rax);  // load c
1333   __ store_ptr(3, rcx);  // store a in c
1334   __ store_ptr(5, rax);  // store c in a
1335   // stack: ..., c, d, a, b, c, d
1336 }
1337 
1338 void TemplateTable::swap() {
1339   transition(vtos, vtos);
1340   // stack: ..., a, b
1341   __ load_ptr( 1, rcx);  // load a
1342   __ load_ptr( 0, rax);  // load b
1343   __ store_ptr(0, rcx);  // store a in b
1344   __ store_ptr(1, rax);  // store b in a
1345   // stack: ..., b, a
1346 }
1347 
1348 void TemplateTable::iop2(Operation op) {
1349   transition(itos, itos);
1350   switch (op) {
1351   case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
1352   case sub  : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1353   case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
1354   case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
1355   case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
1356   case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
1357   case shl  : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax);      break;
1358   case shr  : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax);      break;
1359   case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax);      break;
1360   default   : ShouldNotReachHere();
1361   }
1362 }
1363 
1364 void TemplateTable::lop2(Operation op) {
1365   transition(ltos, ltos);
1366 #ifdef _LP64
1367   switch (op) {
1368   case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
1369   case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
1370   case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
1371   case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
1372   case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
1373   default   : ShouldNotReachHere();
1374   }
1375 #else
1376   __ pop_l(rbx, rcx);
1377   switch (op) {
1378     case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1379     case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1380                 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1381     case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1382     case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1383     case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1384     default   : ShouldNotReachHere();
1385   }
1386 #endif
1387 }
1388 
1389 void TemplateTable::idiv() {
1390   transition(itos, itos);
1391   __ movl(rcx, rax);
1392   __ pop_i(rax);
1393   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1394   //       they are not equal, one could do a normal division (no correction
1395   //       needed), which may speed up this implementation for the common case.
1396   //       (see also JVM spec., p.243 & p.271)
1397   __ corrected_idivl(rcx);
1398 }
1399 
1400 void TemplateTable::irem() {
1401   transition(itos, itos);
1402   __ movl(rcx, rax);
1403   __ pop_i(rax);
1404   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1405   //       they are not equal, one could do a normal division (no correction
1406   //       needed), which may speed up this implementation for the common case.
1407   //       (see also JVM spec., p.243 & p.271)
1408   __ corrected_idivl(rcx);
1409   __ movl(rax, rdx);
1410 }
1411 
1412 void TemplateTable::lmul() {
1413   transition(ltos, ltos);
1414 #ifdef _LP64
1415   __ pop_l(rdx);
1416   __ imulq(rax, rdx);
1417 #else
1418   __ pop_l(rbx, rcx);
1419   __ push(rcx); __ push(rbx);
1420   __ push(rdx); __ push(rax);
1421   __ lmul(2 * wordSize, 0);
1422   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1423 #endif
1424 }
1425 
1426 void TemplateTable::ldiv() {
1427   transition(ltos, ltos);
1428 #ifdef _LP64
1429   __ mov(rcx, rax);
1430   __ pop_l(rax);
1431   // generate explicit div0 check
1432   __ testq(rcx, rcx);
1433   __ jump_cc(Assembler::zero,
1434              RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1435   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1436   //       they are not equal, one could do a normal division (no correction
1437   //       needed), which may speed up this implementation for the common case.
1438   //       (see also JVM spec., p.243 & p.271)
1439   __ corrected_idivq(rcx); // kills rbx
1440 #else
1441   __ pop_l(rbx, rcx);
1442   __ push(rcx); __ push(rbx);
1443   __ push(rdx); __ push(rax);
1444   // check if y = 0
1445   __ orl(rax, rdx);
1446   __ jump_cc(Assembler::zero,
1447              RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1448   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1449   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1450 #endif
1451 }
1452 
1453 void TemplateTable::lrem() {
1454   transition(ltos, ltos);
1455 #ifdef _LP64
1456   __ mov(rcx, rax);
1457   __ pop_l(rax);
1458   __ testq(rcx, rcx);
1459   __ jump_cc(Assembler::zero,
1460              RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1461   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1462   //       they are not equal, one could do a normal division (no correction
1463   //       needed), which may speed up this implementation for the common case.
1464   //       (see also JVM spec., p.243 & p.271)
1465   __ corrected_idivq(rcx); // kills rbx
1466   __ mov(rax, rdx);
1467 #else
1468   __ pop_l(rbx, rcx);
1469   __ push(rcx); __ push(rbx);
1470   __ push(rdx); __ push(rax);
1471   // check if y = 0
1472   __ orl(rax, rdx);
1473   __ jump_cc(Assembler::zero,
1474              RuntimeAddress(Interpreter::_throw_ArithmeticException_entry));
1475   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1476   __ addptr(rsp, 4 * wordSize);
1477 #endif
1478 }
1479 
1480 void TemplateTable::lshl() {
1481   transition(itos, ltos);
1482   __ movl(rcx, rax);                             // get shift count
1483   #ifdef _LP64
1484   __ pop_l(rax);                                 // get shift value
1485   __ shlq(rax);
1486 #else
1487   __ pop_l(rax, rdx);                            // get shift value
1488   __ lshl(rdx, rax);
1489 #endif
1490 }
1491 
1492 void TemplateTable::lshr() {
1493 #ifdef _LP64
1494   transition(itos, ltos);
1495   __ movl(rcx, rax);                             // get shift count
1496   __ pop_l(rax);                                 // get shift value
1497   __ sarq(rax);
1498 #else
1499   transition(itos, ltos);
1500   __ mov(rcx, rax);                              // get shift count
1501   __ pop_l(rax, rdx);                            // get shift value
1502   __ lshr(rdx, rax, true);
1503 #endif
1504 }
1505 
1506 void TemplateTable::lushr() {
1507   transition(itos, ltos);
1508 #ifdef _LP64
1509   __ movl(rcx, rax);                             // get shift count
1510   __ pop_l(rax);                                 // get shift value
1511   __ shrq(rax);
1512 #else
1513   __ mov(rcx, rax);                              // get shift count
1514   __ pop_l(rax, rdx);                            // get shift value
1515   __ lshr(rdx, rax);
1516 #endif
1517 }
1518 
1519 void TemplateTable::fop2(Operation op) {
1520   transition(ftos, ftos);
1521 
1522   if (UseSSE >= 1) {
1523     switch (op) {
1524     case add:
1525       __ addss(xmm0, at_rsp());
1526       __ addptr(rsp, Interpreter::stackElementSize);
1527       break;
1528     case sub:
1529       __ movflt(xmm1, xmm0);
1530       __ pop_f(xmm0);
1531       __ subss(xmm0, xmm1);
1532       break;
1533     case mul:
1534       __ mulss(xmm0, at_rsp());
1535       __ addptr(rsp, Interpreter::stackElementSize);
1536       break;
1537     case div:
1538       __ movflt(xmm1, xmm0);
1539       __ pop_f(xmm0);
1540       __ divss(xmm0, xmm1);
1541       break;
1542     case rem:
1543       // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1544       // modulo operation. The frem method calls the function
1545       // double fmod(double x, double y) in math.h. The documentation of fmod states:
1546       // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1547       // (signalling or quiet) is returned.
1548       //
1549       // On x86_32 platforms the FPU is used to perform the modulo operation. The
1550       // reason is that on 32-bit Windows the sign of modulo operations diverges from
1551       // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1552       // The fprem instruction used on x86_32 is functionally equivalent to
1553       // SharedRuntime::frem in that it returns a NaN.
1554 #ifdef _LP64
1555       __ movflt(xmm1, xmm0);
1556       __ pop_f(xmm0);
1557       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1558 #else // !_LP64
1559       __ push_f(xmm0);
1560       __ pop_f();
1561       __ fld_s(at_rsp());
1562       __ fremr(rax);
1563       __ f2ieee();
1564       __ pop(rax);  // pop second operand off the stack
1565       __ push_f();
1566       __ pop_f(xmm0);
1567 #endif // _LP64
1568       break;
1569     default:
1570       ShouldNotReachHere();
1571       break;
1572     }
1573   } else {
1574 #ifdef _LP64
1575     ShouldNotReachHere();
1576 #else // !_LP64
1577     switch (op) {
1578     case add: __ fadd_s (at_rsp());                break;
1579     case sub: __ fsubr_s(at_rsp());                break;
1580     case mul: __ fmul_s (at_rsp());                break;
1581     case div: __ fdivr_s(at_rsp());                break;
1582     case rem: __ fld_s  (at_rsp()); __ fremr(rax); break;
1583     default : ShouldNotReachHere();
1584     }
1585     __ f2ieee();
1586     __ pop(rax);  // pop second operand off the stack
1587 #endif // _LP64
1588   }
1589 }
1590 
1591 void TemplateTable::dop2(Operation op) {
1592   transition(dtos, dtos);
1593   if (UseSSE >= 2) {
1594     switch (op) {
1595     case add:
1596       __ addsd(xmm0, at_rsp());
1597       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1598       break;
1599     case sub:
1600       __ movdbl(xmm1, xmm0);
1601       __ pop_d(xmm0);
1602       __ subsd(xmm0, xmm1);
1603       break;
1604     case mul:
1605       __ mulsd(xmm0, at_rsp());
1606       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1607       break;
1608     case div:
1609       __ movdbl(xmm1, xmm0);
1610       __ pop_d(xmm0);
1611       __ divsd(xmm0, xmm1);
1612       break;
1613     case rem:
1614       // Similar to fop2(), the modulo operation is performed using the
1615       // SharedRuntime::drem method (on x86_64 platforms) or using the
1616       // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1617 #ifdef _LP64
1618       __ movdbl(xmm1, xmm0);
1619       __ pop_d(xmm0);
1620       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1621 #else // !_LP64
1622       __ push_d(xmm0);
1623       __ pop_d();
1624       __ fld_d(at_rsp());
1625       __ fremr(rax);
1626       __ d2ieee();
1627       __ pop(rax);
1628       __ pop(rdx);
1629       __ push_d();
1630       __ pop_d(xmm0);
1631 #endif // _LP64
1632       break;
1633     default:
1634       ShouldNotReachHere();
1635       break;
1636     }
1637   } else {
1638 #ifdef _LP64
1639     ShouldNotReachHere();
1640 #else // !_LP64
1641     switch (op) {
1642     case add: __ fadd_d (at_rsp());                break;
1643     case sub: __ fsubr_d(at_rsp());                break;
1644     case mul: {
1645       // strict semantics
1646       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
1647       __ fmulp();
1648       __ fmul_d (at_rsp());
1649       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
1650       __ fmulp();
1651       break;
1652     }
1653     case div: {
1654       // strict semantics
1655       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1()));
1656       __ fmul_d (at_rsp());
1657       __ fdivrp();
1658       __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2()));
1659       __ fmulp();
1660       break;
1661     }
1662     case rem: __ fld_d  (at_rsp()); __ fremr(rax); break;
1663     default : ShouldNotReachHere();
1664     }
1665     __ d2ieee();
1666     // Pop double precision number from rsp.
1667     __ pop(rax);
1668     __ pop(rdx);
1669 #endif // _LP64
1670   }
1671 }
1672 
1673 void TemplateTable::ineg() {
1674   transition(itos, itos);
1675   __ negl(rax);
1676 }
1677 
1678 void TemplateTable::lneg() {
1679   transition(ltos, ltos);
1680   LP64_ONLY(__ negq(rax));
1681   NOT_LP64(__ lneg(rdx, rax));
1682 }
1683 
1684 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1685 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1686   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1687   // of 128-bits operands for SSE instructions.
1688   jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1689   // Store the value to a 128-bits operand.
1690   operand[0] = lo;
1691   operand[1] = hi;
1692   return operand;
1693 }
1694 
1695 // Buffer for 128-bits masks used by SSE instructions.
1696 static jlong float_signflip_pool[2*2];
1697 static jlong double_signflip_pool[2*2];
1698 
1699 void TemplateTable::fneg() {
1700   transition(ftos, ftos);
1701   if (UseSSE >= 1) {
1702     static jlong *float_signflip  = double_quadword(&float_signflip_pool[1],  CONST64(0x8000000080000000),  CONST64(0x8000000080000000));
1703     __ xorps(xmm0, ExternalAddress((address) float_signflip), rscratch1);
1704   } else {
1705     LP64_ONLY(ShouldNotReachHere());
1706     NOT_LP64(__ fchs());
1707   }
1708 }
1709 
1710 void TemplateTable::dneg() {
1711   transition(dtos, dtos);
1712   if (UseSSE >= 2) {
1713     static jlong *double_signflip =
1714       double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1715     __ xorpd(xmm0, ExternalAddress((address) double_signflip), rscratch1);
1716   } else {
1717 #ifdef _LP64
1718     ShouldNotReachHere();
1719 #else
1720     __ fchs();
1721 #endif
1722   }
1723 }
1724 
1725 void TemplateTable::iinc() {
1726   transition(vtos, vtos);
1727   __ load_signed_byte(rdx, at_bcp(2)); // get constant
1728   locals_index(rbx);
1729   __ addl(iaddress(rbx), rdx);
1730 }
1731 
1732 void TemplateTable::wide_iinc() {
1733   transition(vtos, vtos);
1734   __ movl(rdx, at_bcp(4)); // get constant
1735   locals_index_wide(rbx);
1736   __ bswapl(rdx); // swap bytes & sign-extend constant
1737   __ sarl(rdx, 16);
1738   __ addl(iaddress(rbx), rdx);
1739   // Note: should probably use only one movl to get both
1740   //       the index and the constant -> fix this
1741 }
1742 
1743 void TemplateTable::convert() {
1744 #ifdef _LP64
1745   // Checking
1746 #ifdef ASSERT
1747   {
1748     TosState tos_in  = ilgl;
1749     TosState tos_out = ilgl;
1750     switch (bytecode()) {
1751     case Bytecodes::_i2l: // fall through
1752     case Bytecodes::_i2f: // fall through
1753     case Bytecodes::_i2d: // fall through
1754     case Bytecodes::_i2b: // fall through
1755     case Bytecodes::_i2c: // fall through
1756     case Bytecodes::_i2s: tos_in = itos; break;
1757     case Bytecodes::_l2i: // fall through
1758     case Bytecodes::_l2f: // fall through
1759     case Bytecodes::_l2d: tos_in = ltos; break;
1760     case Bytecodes::_f2i: // fall through
1761     case Bytecodes::_f2l: // fall through
1762     case Bytecodes::_f2d: tos_in = ftos; break;
1763     case Bytecodes::_d2i: // fall through
1764     case Bytecodes::_d2l: // fall through
1765     case Bytecodes::_d2f: tos_in = dtos; break;
1766     default             : ShouldNotReachHere();
1767     }
1768     switch (bytecode()) {
1769     case Bytecodes::_l2i: // fall through
1770     case Bytecodes::_f2i: // fall through
1771     case Bytecodes::_d2i: // fall through
1772     case Bytecodes::_i2b: // fall through
1773     case Bytecodes::_i2c: // fall through
1774     case Bytecodes::_i2s: tos_out = itos; break;
1775     case Bytecodes::_i2l: // fall through
1776     case Bytecodes::_f2l: // fall through
1777     case Bytecodes::_d2l: tos_out = ltos; break;
1778     case Bytecodes::_i2f: // fall through
1779     case Bytecodes::_l2f: // fall through
1780     case Bytecodes::_d2f: tos_out = ftos; break;
1781     case Bytecodes::_i2d: // fall through
1782     case Bytecodes::_l2d: // fall through
1783     case Bytecodes::_f2d: tos_out = dtos; break;
1784     default             : ShouldNotReachHere();
1785     }
1786     transition(tos_in, tos_out);
1787   }
1788 #endif // ASSERT
1789 
1790   static const int64_t is_nan = 0x8000000000000000L;
1791 
1792   // Conversion
1793   switch (bytecode()) {
1794   case Bytecodes::_i2l:
1795     __ movslq(rax, rax);
1796     break;
1797   case Bytecodes::_i2f:
1798     __ cvtsi2ssl(xmm0, rax);
1799     break;
1800   case Bytecodes::_i2d:
1801     __ cvtsi2sdl(xmm0, rax);
1802     break;
1803   case Bytecodes::_i2b:
1804     __ movsbl(rax, rax);
1805     break;
1806   case Bytecodes::_i2c:
1807     __ movzwl(rax, rax);
1808     break;
1809   case Bytecodes::_i2s:
1810     __ movswl(rax, rax);
1811     break;
1812   case Bytecodes::_l2i:
1813     __ movl(rax, rax);
1814     break;
1815   case Bytecodes::_l2f:
1816     __ cvtsi2ssq(xmm0, rax);
1817     break;
1818   case Bytecodes::_l2d:
1819     __ cvtsi2sdq(xmm0, rax);
1820     break;
1821   case Bytecodes::_f2i:
1822   {
1823     Label L;
1824     __ cvttss2sil(rax, xmm0);
1825     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1826     __ jcc(Assembler::notEqual, L);
1827     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1828     __ bind(L);
1829   }
1830     break;
1831   case Bytecodes::_f2l:
1832   {
1833     Label L;
1834     __ cvttss2siq(rax, xmm0);
1835     // NaN or overflow/underflow?
1836     __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1);
1837     __ jcc(Assembler::notEqual, L);
1838     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1839     __ bind(L);
1840   }
1841     break;
1842   case Bytecodes::_f2d:
1843     __ cvtss2sd(xmm0, xmm0);
1844     break;
1845   case Bytecodes::_d2i:
1846   {
1847     Label L;
1848     __ cvttsd2sil(rax, xmm0);
1849     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1850     __ jcc(Assembler::notEqual, L);
1851     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1852     __ bind(L);
1853   }
1854     break;
1855   case Bytecodes::_d2l:
1856   {
1857     Label L;
1858     __ cvttsd2siq(rax, xmm0);
1859     // NaN or overflow/underflow?
1860     __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1);
1861     __ jcc(Assembler::notEqual, L);
1862     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1863     __ bind(L);
1864   }
1865     break;
1866   case Bytecodes::_d2f:
1867     __ cvtsd2ss(xmm0, xmm0);
1868     break;
1869   default:
1870     ShouldNotReachHere();
1871   }
1872 #else // !_LP64
1873   // Checking
1874 #ifdef ASSERT
1875   { TosState tos_in  = ilgl;
1876     TosState tos_out = ilgl;
1877     switch (bytecode()) {
1878       case Bytecodes::_i2l: // fall through
1879       case Bytecodes::_i2f: // fall through
1880       case Bytecodes::_i2d: // fall through
1881       case Bytecodes::_i2b: // fall through
1882       case Bytecodes::_i2c: // fall through
1883       case Bytecodes::_i2s: tos_in = itos; break;
1884       case Bytecodes::_l2i: // fall through
1885       case Bytecodes::_l2f: // fall through
1886       case Bytecodes::_l2d: tos_in = ltos; break;
1887       case Bytecodes::_f2i: // fall through
1888       case Bytecodes::_f2l: // fall through
1889       case Bytecodes::_f2d: tos_in = ftos; break;
1890       case Bytecodes::_d2i: // fall through
1891       case Bytecodes::_d2l: // fall through
1892       case Bytecodes::_d2f: tos_in = dtos; break;
1893       default             : ShouldNotReachHere();
1894     }
1895     switch (bytecode()) {
1896       case Bytecodes::_l2i: // fall through
1897       case Bytecodes::_f2i: // fall through
1898       case Bytecodes::_d2i: // fall through
1899       case Bytecodes::_i2b: // fall through
1900       case Bytecodes::_i2c: // fall through
1901       case Bytecodes::_i2s: tos_out = itos; break;
1902       case Bytecodes::_i2l: // fall through
1903       case Bytecodes::_f2l: // fall through
1904       case Bytecodes::_d2l: tos_out = ltos; break;
1905       case Bytecodes::_i2f: // fall through
1906       case Bytecodes::_l2f: // fall through
1907       case Bytecodes::_d2f: tos_out = ftos; break;
1908       case Bytecodes::_i2d: // fall through
1909       case Bytecodes::_l2d: // fall through
1910       case Bytecodes::_f2d: tos_out = dtos; break;
1911       default             : ShouldNotReachHere();
1912     }
1913     transition(tos_in, tos_out);
1914   }
1915 #endif // ASSERT
1916 
1917   // Conversion
1918   // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1919   switch (bytecode()) {
1920     case Bytecodes::_i2l:
1921       __ extend_sign(rdx, rax);
1922       break;
1923     case Bytecodes::_i2f:
1924       if (UseSSE >= 1) {
1925         __ cvtsi2ssl(xmm0, rax);
1926       } else {
1927         __ push(rax);          // store int on tos
1928         __ fild_s(at_rsp());   // load int to ST0
1929         __ f2ieee();           // truncate to float size
1930         __ pop(rcx);           // adjust rsp
1931       }
1932       break;
1933     case Bytecodes::_i2d:
1934       if (UseSSE >= 2) {
1935         __ cvtsi2sdl(xmm0, rax);
1936       } else {
1937       __ push(rax);          // add one slot for d2ieee()
1938       __ push(rax);          // store int on tos
1939       __ fild_s(at_rsp());   // load int to ST0
1940       __ d2ieee();           // truncate to double size
1941       __ pop(rcx);           // adjust rsp
1942       __ pop(rcx);
1943       }
1944       break;
1945     case Bytecodes::_i2b:
1946       __ shll(rax, 24);      // truncate upper 24 bits
1947       __ sarl(rax, 24);      // and sign-extend byte
1948       LP64_ONLY(__ movsbl(rax, rax));
1949       break;
1950     case Bytecodes::_i2c:
1951       __ andl(rax, 0xFFFF);  // truncate upper 16 bits
1952       LP64_ONLY(__ movzwl(rax, rax));
1953       break;
1954     case Bytecodes::_i2s:
1955       __ shll(rax, 16);      // truncate upper 16 bits
1956       __ sarl(rax, 16);      // and sign-extend short
1957       LP64_ONLY(__ movswl(rax, rax));
1958       break;
1959     case Bytecodes::_l2i:
1960       /* nothing to do */
1961       break;
1962     case Bytecodes::_l2f:
1963       // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
1964       // 64-bit long values to floats. On 32-bit platforms it is not possible
1965       // to use that instruction with 64-bit operands, therefore the FPU is
1966       // used to perform the conversion.
1967       __ push(rdx);          // store long on tos
1968       __ push(rax);
1969       __ fild_d(at_rsp());   // load long to ST0
1970       __ f2ieee();           // truncate to float size
1971       __ pop(rcx);           // adjust rsp
1972       __ pop(rcx);
1973       if (UseSSE >= 1) {
1974         __ push_f();
1975         __ pop_f(xmm0);
1976       }
1977       break;
1978     case Bytecodes::_l2d:
1979       // On 32-bit platforms the FPU is used for conversion because on
1980       // 32-bit platforms it is not not possible to use the cvtsi2sdq
1981       // instruction with 64-bit operands.
1982       __ push(rdx);          // store long on tos
1983       __ push(rax);
1984       __ fild_d(at_rsp());   // load long to ST0
1985       __ d2ieee();           // truncate to double size
1986       __ pop(rcx);           // adjust rsp
1987       __ pop(rcx);
1988       if (UseSSE >= 2) {
1989         __ push_d();
1990         __ pop_d(xmm0);
1991       }
1992       break;
1993     case Bytecodes::_f2i:
1994       // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
1995       // as it returns 0 for any NaN.
1996       if (UseSSE >= 1) {
1997         __ push_f(xmm0);
1998       } else {
1999         __ push(rcx);          // reserve space for argument
2000         __ fstp_s(at_rsp());   // pass float argument on stack
2001       }
2002       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2003       break;
2004     case Bytecodes::_f2l:
2005       // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
2006       // as it returns 0 for any NaN.
2007       if (UseSSE >= 1) {
2008        __ push_f(xmm0);
2009       } else {
2010         __ push(rcx);          // reserve space for argument
2011         __ fstp_s(at_rsp());   // pass float argument on stack
2012       }
2013       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2014       break;
2015     case Bytecodes::_f2d:
2016       if (UseSSE < 1) {
2017         /* nothing to do */
2018       } else if (UseSSE == 1) {
2019         __ push_f(xmm0);
2020         __ pop_f();
2021       } else { // UseSSE >= 2
2022         __ cvtss2sd(xmm0, xmm0);
2023       }
2024       break;
2025     case Bytecodes::_d2i:
2026       if (UseSSE >= 2) {
2027         __ push_d(xmm0);
2028       } else {
2029         __ push(rcx);          // reserve space for argument
2030         __ push(rcx);
2031         __ fstp_d(at_rsp());   // pass double argument on stack
2032       }
2033       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2034       break;
2035     case Bytecodes::_d2l:
2036       if (UseSSE >= 2) {
2037         __ push_d(xmm0);
2038       } else {
2039         __ push(rcx);          // reserve space for argument
2040         __ push(rcx);
2041         __ fstp_d(at_rsp());   // pass double argument on stack
2042       }
2043       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2044       break;
2045     case Bytecodes::_d2f:
2046       if (UseSSE <= 1) {
2047         __ push(rcx);          // reserve space for f2ieee()
2048         __ f2ieee();           // truncate to float size
2049         __ pop(rcx);           // adjust rsp
2050         if (UseSSE == 1) {
2051           // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2052           // the conversion is performed using the FPU in this case.
2053           __ push_f();
2054           __ pop_f(xmm0);
2055         }
2056       } else { // UseSSE >= 2
2057         __ cvtsd2ss(xmm0, xmm0);
2058       }
2059       break;
2060     default             :
2061       ShouldNotReachHere();
2062   }
2063 #endif // _LP64
2064 }
2065 
2066 void TemplateTable::lcmp() {
2067   transition(ltos, itos);
2068 #ifdef _LP64
2069   Label done;
2070   __ pop_l(rdx);
2071   __ cmpq(rdx, rax);
2072   __ movl(rax, -1);
2073   __ jccb(Assembler::less, done);
2074   __ setb(Assembler::notEqual, rax);
2075   __ movzbl(rax, rax);
2076   __ bind(done);
2077 #else
2078 
2079   // y = rdx:rax
2080   __ pop_l(rbx, rcx);             // get x = rcx:rbx
2081   __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2082   __ mov(rax, rcx);
2083 #endif
2084 }
2085 
2086 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2087   if ((is_float && UseSSE >= 1) ||
2088       (!is_float && UseSSE >= 2)) {
2089     Label done;
2090     if (is_float) {
2091       // XXX get rid of pop here, use ... reg, mem32
2092       __ pop_f(xmm1);
2093       __ ucomiss(xmm1, xmm0);
2094     } else {
2095       // XXX get rid of pop here, use ... reg, mem64
2096       __ pop_d(xmm1);
2097       __ ucomisd(xmm1, xmm0);
2098     }
2099     if (unordered_result < 0) {
2100       __ movl(rax, -1);
2101       __ jccb(Assembler::parity, done);
2102       __ jccb(Assembler::below, done);
2103       __ setb(Assembler::notEqual, rdx);
2104       __ movzbl(rax, rdx);
2105     } else {
2106       __ movl(rax, 1);
2107       __ jccb(Assembler::parity, done);
2108       __ jccb(Assembler::above, done);
2109       __ movl(rax, 0);
2110       __ jccb(Assembler::equal, done);
2111       __ decrementl(rax);
2112     }
2113     __ bind(done);
2114   } else {
2115 #ifdef _LP64
2116     ShouldNotReachHere();
2117 #else // !_LP64
2118     if (is_float) {
2119       __ fld_s(at_rsp());
2120     } else {
2121       __ fld_d(at_rsp());
2122       __ pop(rdx);
2123     }
2124     __ pop(rcx);
2125     __ fcmp2int(rax, unordered_result < 0);
2126 #endif // _LP64
2127   }
2128 }
2129 
2130 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2131   __ get_method(rcx); // rcx holds method
2132   __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2133                                      // holds bumped taken count
2134 
2135   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2136                              InvocationCounter::counter_offset();
2137   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2138                               InvocationCounter::counter_offset();
2139 
2140   // Load up edx with the branch displacement
2141   if (is_wide) {
2142     __ movl(rdx, at_bcp(1));
2143   } else {
2144     __ load_signed_short(rdx, at_bcp(1));
2145   }
2146   __ bswapl(rdx);
2147 
2148   if (!is_wide) {
2149     __ sarl(rdx, 16);
2150   }
2151   LP64_ONLY(__ movl2ptr(rdx, rdx));
2152 
2153   // Handle all the JSR stuff here, then exit.
2154   // It's much shorter and cleaner than intermingling with the non-JSR
2155   // normal-branch stuff occurring below.
2156   if (is_jsr) {
2157     // Pre-load the next target bytecode into rbx
2158     __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2159 
2160     // compute return address as bci in rax
2161     __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2162                         in_bytes(ConstMethod::codes_offset())));
2163     __ subptr(rax, Address(rcx, Method::const_offset()));
2164     // Adjust the bcp in r13 by the displacement in rdx
2165     __ addptr(rbcp, rdx);
2166     // jsr returns atos that is not an oop
2167     __ push_i(rax);
2168     __ dispatch_only(vtos, true);
2169     return;
2170   }
2171 
2172   // Normal (non-jsr) branch handling
2173 
2174   // Adjust the bcp in r13 by the displacement in rdx
2175   __ addptr(rbcp, rdx);
2176 
2177   assert(UseLoopCounter || !UseOnStackReplacement,
2178          "on-stack-replacement requires loop counters");
2179   Label backedge_counter_overflow;
2180   Label dispatch;
2181   if (UseLoopCounter) {
2182     // increment backedge counter for backward branches
2183     // rax: MDO
2184     // rbx: MDO bumped taken-count
2185     // rcx: method
2186     // rdx: target offset
2187     // r13: target bcp
2188     // r14: locals pointer
2189     __ testl(rdx, rdx);             // check if forward or backward branch
2190     __ jcc(Assembler::positive, dispatch); // count only if backward branch
2191 
2192     // check if MethodCounters exists
2193     Label has_counters;
2194     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2195     __ testptr(rax, rax);
2196     __ jcc(Assembler::notZero, has_counters);
2197     __ push(rdx);
2198     __ push(rcx);
2199     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2200                rcx);
2201     __ pop(rcx);
2202     __ pop(rdx);
2203     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2204     __ testptr(rax, rax);
2205     __ jcc(Assembler::zero, dispatch);
2206     __ bind(has_counters);
2207 
2208     Label no_mdo;
2209     if (ProfileInterpreter) {
2210       // Are we profiling?
2211       __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2212       __ testptr(rbx, rbx);
2213       __ jccb(Assembler::zero, no_mdo);
2214       // Increment the MDO backedge counter
2215       const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2216           in_bytes(InvocationCounter::counter_offset()));
2217       const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2218       __ increment_mask_and_jump(mdo_backedge_counter, mask, rax,
2219           UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
2220       __ jmp(dispatch);
2221     }
2222     __ bind(no_mdo);
2223     // Increment backedge counter in MethodCounters*
2224     __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2225     const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2226     __ increment_mask_and_jump(Address(rcx, be_offset), mask, rax,
2227         UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
2228     __ bind(dispatch);
2229   }
2230 
2231   // Pre-load the next target bytecode into rbx
2232   __ load_unsigned_byte(rbx, Address(rbcp, 0));
2233 
2234   // continue with the bytecode @ target
2235   // rax: return bci for jsr's, unused otherwise
2236   // rbx: target bytecode
2237   // r13: target bcp
2238   __ dispatch_only(vtos, true);
2239 
2240   if (UseLoopCounter) {
2241     if (UseOnStackReplacement) {
2242       Label set_mdp;
2243       // invocation counter overflow
2244       __ bind(backedge_counter_overflow);
2245       __ negptr(rdx);
2246       __ addptr(rdx, rbcp); // branch bcp
2247       // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2248       __ call_VM(noreg,
2249                  CAST_FROM_FN_PTR(address,
2250                                   InterpreterRuntime::frequency_counter_overflow),
2251                  rdx);
2252 
2253       // rax: osr nmethod (osr ok) or null (osr not possible)
2254       // rdx: scratch
2255       // r14: locals pointer
2256       // r13: bcp
2257       __ testptr(rax, rax);                        // test result
2258       __ jcc(Assembler::zero, dispatch);         // no osr if null
2259       // nmethod may have been invalidated (VM may block upon call_VM return)
2260       __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2261       __ jcc(Assembler::notEqual, dispatch);
2262 
2263       // We have the address of an on stack replacement routine in rax.
2264       // In preparation of invoking it, first we must migrate the locals
2265       // and monitors from off the interpreter frame on the stack.
2266       // Ensure to save the osr nmethod over the migration call,
2267       // it will be preserved in rbx.
2268       __ mov(rbx, rax);
2269 
2270       NOT_LP64(__ get_thread(rcx));
2271 
2272       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2273 
2274       // rax is OSR buffer, move it to expected parameter location
2275       LP64_ONLY(__ mov(j_rarg0, rax));
2276       NOT_LP64(__ mov(rcx, rax));
2277       // We use j_rarg definitions here so that registers don't conflict as parameter
2278       // registers change across platforms as we are in the midst of a calling
2279       // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2280 
2281       const Register retaddr   = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2282       const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2283 
2284       // pop the interpreter frame
2285       __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2286       __ leave();                                // remove frame anchor
2287       __ pop(retaddr);                           // get return address
2288       __ mov(rsp, sender_sp);                   // set sp to sender sp
2289       // Ensure compiled code always sees stack at proper alignment
2290       __ andptr(rsp, -(StackAlignmentInBytes));
2291 
2292       // unlike x86 we need no specialized return from compiled code
2293       // to the interpreter or the call stub.
2294 
2295       // push the return address
2296       __ push(retaddr);
2297 
2298       // and begin the OSR nmethod
2299       __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2300     }
2301   }
2302 }
2303 
2304 void TemplateTable::if_0cmp(Condition cc) {
2305   transition(itos, vtos);
2306   // assume branch is more often taken than not (loops use backward branches)
2307   Label not_taken;
2308   __ testl(rax, rax);
2309   __ jcc(j_not(cc), not_taken);
2310   branch(false, false);
2311   __ bind(not_taken);
2312   __ profile_not_taken_branch(rax);
2313 }
2314 
2315 void TemplateTable::if_icmp(Condition cc) {
2316   transition(itos, vtos);
2317   // assume branch is more often taken than not (loops use backward branches)
2318   Label not_taken;
2319   __ pop_i(rdx);
2320   __ cmpl(rdx, rax);
2321   __ jcc(j_not(cc), not_taken);
2322   branch(false, false);
2323   __ bind(not_taken);
2324   __ profile_not_taken_branch(rax);
2325 }
2326 
2327 void TemplateTable::if_nullcmp(Condition cc) {
2328   transition(atos, vtos);
2329   // assume branch is more often taken than not (loops use backward branches)
2330   Label not_taken;
2331   __ testptr(rax, rax);
2332   __ jcc(j_not(cc), not_taken);
2333   branch(false, false);
2334   __ bind(not_taken);
2335   __ profile_not_taken_branch(rax);
2336 }
2337 
2338 void TemplateTable::if_acmp(Condition cc) {
2339   transition(atos, vtos);
2340   // assume branch is more often taken than not (loops use backward branches)
2341   Label not_taken;
2342   __ pop_ptr(rdx);
2343   __ cmpoop(rdx, rax);
2344   __ jcc(j_not(cc), not_taken);
2345   branch(false, false);
2346   __ bind(not_taken);
2347   __ profile_not_taken_branch(rax);
2348 }
2349 
2350 void TemplateTable::ret() {
2351   transition(vtos, vtos);
2352   locals_index(rbx);
2353   LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2354   NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2355   __ profile_ret(rbx, rcx);
2356   __ get_method(rax);
2357   __ movptr(rbcp, Address(rax, Method::const_offset()));
2358   __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2359                       ConstMethod::codes_offset()));
2360   __ dispatch_next(vtos, 0, true);
2361 }
2362 
2363 void TemplateTable::wide_ret() {
2364   transition(vtos, vtos);
2365   locals_index_wide(rbx);
2366   __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2367   __ profile_ret(rbx, rcx);
2368   __ get_method(rax);
2369   __ movptr(rbcp, Address(rax, Method::const_offset()));
2370   __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2371   __ dispatch_next(vtos, 0, true);
2372 }
2373 
2374 void TemplateTable::tableswitch() {
2375   Label default_case, continue_execution;
2376   transition(itos, vtos);
2377 
2378   // align r13/rsi
2379   __ lea(rbx, at_bcp(BytesPerInt));
2380   __ andptr(rbx, -BytesPerInt);
2381   // load lo & hi
2382   __ movl(rcx, Address(rbx, BytesPerInt));
2383   __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2384   __ bswapl(rcx);
2385   __ bswapl(rdx);
2386   // check against lo & hi
2387   __ cmpl(rax, rcx);
2388   __ jcc(Assembler::less, default_case);
2389   __ cmpl(rax, rdx);
2390   __ jcc(Assembler::greater, default_case);
2391   // lookup dispatch offset
2392   __ subl(rax, rcx);
2393   __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2394   __ profile_switch_case(rax, rbx, rcx);
2395   // continue execution
2396   __ bind(continue_execution);
2397   __ bswapl(rdx);
2398   LP64_ONLY(__ movl2ptr(rdx, rdx));
2399   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2400   __ addptr(rbcp, rdx);
2401   __ dispatch_only(vtos, true);
2402   // handle default
2403   __ bind(default_case);
2404   __ profile_switch_default(rax);
2405   __ movl(rdx, Address(rbx, 0));
2406   __ jmp(continue_execution);
2407 }
2408 
2409 void TemplateTable::lookupswitch() {
2410   transition(itos, itos);
2411   __ stop("lookupswitch bytecode should have been rewritten");
2412 }
2413 
2414 void TemplateTable::fast_linearswitch() {
2415   transition(itos, vtos);
2416   Label loop_entry, loop, found, continue_execution;
2417   // bswap rax so we can avoid bswapping the table entries
2418   __ bswapl(rax);
2419   // align r13
2420   __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2421                                     // this instruction (change offsets
2422                                     // below)
2423   __ andptr(rbx, -BytesPerInt);
2424   // set counter
2425   __ movl(rcx, Address(rbx, BytesPerInt));
2426   __ bswapl(rcx);
2427   __ jmpb(loop_entry);
2428   // table search
2429   __ bind(loop);
2430   __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2431   __ jcc(Assembler::equal, found);
2432   __ bind(loop_entry);
2433   __ decrementl(rcx);
2434   __ jcc(Assembler::greaterEqual, loop);
2435   // default case
2436   __ profile_switch_default(rax);
2437   __ movl(rdx, Address(rbx, 0));
2438   __ jmp(continue_execution);
2439   // entry found -> get offset
2440   __ bind(found);
2441   __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2442   __ profile_switch_case(rcx, rax, rbx);
2443   // continue execution
2444   __ bind(continue_execution);
2445   __ bswapl(rdx);
2446   __ movl2ptr(rdx, rdx);
2447   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2448   __ addptr(rbcp, rdx);
2449   __ dispatch_only(vtos, true);
2450 }
2451 
2452 void TemplateTable::fast_binaryswitch() {
2453   transition(itos, vtos);
2454   // Implementation using the following core algorithm:
2455   //
2456   // int binary_search(int key, LookupswitchPair* array, int n) {
2457   //   // Binary search according to "Methodik des Programmierens" by
2458   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2459   //   int i = 0;
2460   //   int j = n;
2461   //   while (i+1 < j) {
2462   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2463   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2464   //     // where a stands for the array and assuming that the (inexisting)
2465   //     // element a[n] is infinitely big.
2466   //     int h = (i + j) >> 1;
2467   //     // i < h < j
2468   //     if (key < array[h].fast_match()) {
2469   //       j = h;
2470   //     } else {
2471   //       i = h;
2472   //     }
2473   //   }
2474   //   // R: a[i] <= key < a[i+1] or Q
2475   //   // (i.e., if key is within array, i is the correct index)
2476   //   return i;
2477   // }
2478 
2479   // Register allocation
2480   const Register key   = rax; // already set (tosca)
2481   const Register array = rbx;
2482   const Register i     = rcx;
2483   const Register j     = rdx;
2484   const Register h     = rdi;
2485   const Register temp  = rsi;
2486 
2487   // Find array start
2488   NOT_LP64(__ save_bcp());
2489 
2490   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2491                                           // get rid of this
2492                                           // instruction (change
2493                                           // offsets below)
2494   __ andptr(array, -BytesPerInt);
2495 
2496   // Initialize i & j
2497   __ xorl(i, i);                            // i = 0;
2498   __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2499 
2500   // Convert j into native byteordering
2501   __ bswapl(j);
2502 
2503   // And start
2504   Label entry;
2505   __ jmp(entry);
2506 
2507   // binary search loop
2508   {
2509     Label loop;
2510     __ bind(loop);
2511     // int h = (i + j) >> 1;
2512     __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2513     __ sarl(h, 1);                               // h = (i + j) >> 1;
2514     // if (key < array[h].fast_match()) {
2515     //   j = h;
2516     // } else {
2517     //   i = h;
2518     // }
2519     // Convert array[h].match to native byte-ordering before compare
2520     __ movl(temp, Address(array, h, Address::times_8));
2521     __ bswapl(temp);
2522     __ cmpl(key, temp);
2523     // j = h if (key <  array[h].fast_match())
2524     __ cmov32(Assembler::less, j, h);
2525     // i = h if (key >= array[h].fast_match())
2526     __ cmov32(Assembler::greaterEqual, i, h);
2527     // while (i+1 < j)
2528     __ bind(entry);
2529     __ leal(h, Address(i, 1)); // i+1
2530     __ cmpl(h, j);             // i+1 < j
2531     __ jcc(Assembler::less, loop);
2532   }
2533 
2534   // end of binary search, result index is i (must check again!)
2535   Label default_case;
2536   // Convert array[i].match to native byte-ordering before compare
2537   __ movl(temp, Address(array, i, Address::times_8));
2538   __ bswapl(temp);
2539   __ cmpl(key, temp);
2540   __ jcc(Assembler::notEqual, default_case);
2541 
2542   // entry found -> j = offset
2543   __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2544   __ profile_switch_case(i, key, array);
2545   __ bswapl(j);
2546   LP64_ONLY(__ movslq(j, j));
2547 
2548   NOT_LP64(__ restore_bcp());
2549   NOT_LP64(__ restore_locals());                           // restore rdi
2550 
2551   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2552   __ addptr(rbcp, j);
2553   __ dispatch_only(vtos, true);
2554 
2555   // default case -> j = default offset
2556   __ bind(default_case);
2557   __ profile_switch_default(i);
2558   __ movl(j, Address(array, -2 * BytesPerInt));
2559   __ bswapl(j);
2560   LP64_ONLY(__ movslq(j, j));
2561 
2562   NOT_LP64(__ restore_bcp());
2563   NOT_LP64(__ restore_locals());
2564 
2565   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2566   __ addptr(rbcp, j);
2567   __ dispatch_only(vtos, true);
2568 }
2569 
2570 void TemplateTable::_return(TosState state) {
2571   transition(state, state);
2572 
2573   assert(_desc->calls_vm(),
2574          "inconsistent calls_vm information"); // call in remove_activation
2575 
2576   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2577     assert(state == vtos, "only valid state");
2578     Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2579     __ movptr(robj, aaddress(0));
2580     __ load_klass(rdi, robj, rscratch1);
2581     __ testb(Address(rdi, Klass::misc_flags_offset()), KlassFlags::_misc_has_finalizer);
2582     Label skip_register_finalizer;
2583     __ jcc(Assembler::zero, skip_register_finalizer);
2584 
2585     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2586 
2587     __ bind(skip_register_finalizer);
2588   }
2589 
2590   if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2591     Label no_safepoint;
2592     NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2593 #ifdef _LP64
2594     __ testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2595 #else
2596     const Register thread = rdi;
2597     __ get_thread(thread);
2598     __ testb(Address(thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
2599 #endif
2600     __ jcc(Assembler::zero, no_safepoint);
2601     __ push(state);
2602     __ push_cont_fastpath();
2603     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2604                                        InterpreterRuntime::at_safepoint));
2605     __ pop_cont_fastpath();
2606     __ pop(state);
2607     __ bind(no_safepoint);
2608   }
2609 
2610   // Narrow result if state is itos but result type is smaller.
2611   // Need to narrow in the return bytecode rather than in generate_return_entry
2612   // since compiled code callers expect the result to already be narrowed.
2613   if (state == itos) {
2614     __ narrow(rax);
2615   }
2616   __ remove_activation(state, rbcp);
2617 
2618   __ jmp(rbcp);
2619 }
2620 
2621 // ----------------------------------------------------------------------------
2622 // Volatile variables demand their effects be made known to all CPU's
2623 // in order.  Store buffers on most chips allow reads & writes to
2624 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2625 // without some kind of memory barrier (i.e., it's not sufficient that
2626 // the interpreter does not reorder volatile references, the hardware
2627 // also must not reorder them).
2628 //
2629 // According to the new Java Memory Model (JMM):
2630 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2631 //     writes act as acquire & release, so:
2632 // (2) A read cannot let unrelated NON-volatile memory refs that
2633 //     happen after the read float up to before the read.  It's OK for
2634 //     non-volatile memory refs that happen before the volatile read to
2635 //     float down below it.
2636 // (3) Similar a volatile write cannot let unrelated NON-volatile
2637 //     memory refs that happen BEFORE the write float down to after the
2638 //     write.  It's OK for non-volatile memory refs that happen after the
2639 //     volatile write to float up before it.
2640 //
2641 // We only put in barriers around volatile refs (they are expensive),
2642 // not _between_ memory refs (that would require us to track the
2643 // flavor of the previous memory refs).  Requirements (2) and (3)
2644 // require some barriers before volatile stores and after volatile
2645 // loads.  These nearly cover requirement (1) but miss the
2646 // volatile-store-volatile-load case.  This final case is placed after
2647 // volatile-stores although it could just as well go before
2648 // volatile-loads.
2649 
2650 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2651   // Helper function to insert a is-volatile test and memory barrier
2652   __ membar(order_constraint);
2653 }
2654 
2655 void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
2656                                                        Register cache,
2657                                                        Register index) {
2658   const Register temp = rbx;
2659   assert_different_registers(cache, index, temp);
2660 
2661   Label L_clinit_barrier_slow;
2662   Label resolved;
2663 
2664   Bytecodes::Code code = bytecode();
2665 
2666   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2667 
2668   __ load_method_entry(cache, index);
2669   switch(byte_no) {
2670     case f1_byte:
2671       __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedMethodEntry::bytecode1_offset())));
2672       break;
2673     case f2_byte:
2674       __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedMethodEntry::bytecode2_offset())));
2675       break;
2676     default:
2677       ShouldNotReachHere();
2678   }
2679   __ cmpl(temp, code);  // have we resolved this bytecode?
2680   __ jcc(Assembler::equal, resolved);
2681 
2682   // resolve first time through
2683   // Class initialization barrier slow path lands here as well.
2684   __ bind(L_clinit_barrier_slow);
2685   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2686   __ movl(temp, code);
2687   __ call_VM(noreg, entry, temp);
2688   // Update registers with resolved info
2689   __ load_method_entry(cache, index);
2690 
2691   __ bind(resolved);
2692 
2693   // Class initialization barrier for static methods
2694   if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2695     const Register method = temp;
2696     const Register klass  = temp;
2697     const Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2698     assert(thread != noreg, "x86_32 not supported");
2699 
2700     __ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2701     __ load_method_holder(klass, method);
2702     __ clinit_barrier(klass, thread, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
2703   }
2704 }
2705 
2706 void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
2707                                             Register cache,
2708                                             Register index) {
2709   const Register temp = rbx;
2710   assert_different_registers(cache, index, temp);
2711 
2712   Label L_clinit_barrier_slow;
2713   Label resolved;
2714 
2715   Bytecodes::Code code = bytecode();
2716   switch (code) {
2717     case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2718     case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2719     default: break;
2720   }
2721 
2722   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2723   __ load_field_entry(cache, index);
2724   if (byte_no == f1_byte) {
2725     __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedFieldEntry::get_code_offset())));
2726   } else {
2727     __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedFieldEntry::put_code_offset())));
2728   }
2729   __ cmpl(temp, code);  // have we resolved this bytecode?
2730   __ jcc(Assembler::equal, resolved);
2731 
2732   // resolve first time through
2733   __ bind(L_clinit_barrier_slow);
2734   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2735   __ movl(temp, code);
2736   __ call_VM(noreg, entry, temp);
2737   // Update registers with resolved info
2738   __ load_field_entry(cache, index);
2739 
2740   __ bind(resolved);
2741 
2742   // Class initialization barrier for static fields
2743   if (VM_Version::supports_fast_class_init_checks() &&
2744       (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
2745     const Register field_holder = temp;
2746     const Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2747     assert(thread != noreg, "x86_32 not supported");
2748 
2749     __ movptr(field_holder, Address(cache, in_bytes(ResolvedFieldEntry::field_holder_offset())));
2750     __ clinit_barrier(field_holder, thread, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
2751   }
2752 }
2753 
2754 void TemplateTable::load_resolved_field_entry(Register obj,
2755                                               Register cache,
2756                                               Register tos_state,
2757                                               Register offset,
2758                                               Register flags,
2759                                               bool is_static = false) {
2760   assert_different_registers(cache, tos_state, flags, offset);
2761 
2762   // Field offset
2763   __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
2764 
2765   // Flags
2766   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset())));
2767 
2768   // TOS state
2769   __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset())));
2770 
2771   // Klass overwrite register
2772   if (is_static) {
2773     __ movptr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset()));
2774     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2775     __ movptr(obj, Address(obj, mirror_offset));
2776     __ resolve_oop_handle(obj, rscratch2);
2777   }
2778 
2779 }
2780 
2781 void TemplateTable::load_invokedynamic_entry(Register method) {
2782   // setup registers
2783   const Register appendix = rax;
2784   const Register cache = rcx;
2785   const Register index = rdx;
2786   assert_different_registers(method, appendix, cache, index);
2787 
2788   __ save_bcp();
2789 
2790   Label resolved;
2791 
2792   __ load_resolved_indy_entry(cache, index);
2793   __ movptr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2794 
2795   // Compare the method to zero
2796   __ testptr(method, method);
2797   __ jcc(Assembler::notZero, resolved);
2798 
2799   Bytecodes::Code code = bytecode();
2800 
2801   // Call to the interpreter runtime to resolve invokedynamic
2802   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2803   __ movl(method, code); // this is essentially Bytecodes::_invokedynamic
2804   __ call_VM(noreg, entry, method);
2805   // Update registers with resolved info
2806   __ load_resolved_indy_entry(cache, index);
2807   __ movptr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2808 
2809 #ifdef ASSERT
2810   __ testptr(method, method);
2811   __ jcc(Assembler::notZero, resolved);
2812   __ stop("Should be resolved by now");
2813 #endif // ASSERT
2814   __ bind(resolved);
2815 
2816   Label L_no_push;
2817   // Check if there is an appendix
2818   __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())));
2819   __ testl(index, (1 << ResolvedIndyEntry::has_appendix_shift));
2820   __ jcc(Assembler::zero, L_no_push);
2821 
2822   // Get appendix
2823   __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())));
2824   // Push the appendix as a trailing parameter
2825   // since the parameter_size includes it.
2826   __ load_resolved_reference_at_index(appendix, index);
2827   __ verify_oop(appendix);
2828   __ push(appendix);  // push appendix (MethodType, CallSite, etc.)
2829   __ bind(L_no_push);
2830 
2831   // compute return type
2832   __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())));
2833   // load return address
2834   {
2835     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2836     ExternalAddress table(table_addr);
2837 #ifdef _LP64
2838     __ lea(rscratch1, table);
2839     __ movptr(index, Address(rscratch1, index, Address::times_ptr));
2840 #else
2841     __ movptr(index, ArrayAddress(table, Address(noreg, index, Address::times_ptr)));
2842 #endif // _LP64
2843   }
2844 
2845   // push return address
2846   __ push(index);
2847 }
2848 
2849 void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
2850                                                                  Register method,
2851                                                                  Register flags) {
2852   // setup registers
2853   const Register index = rdx;
2854   assert_different_registers(cache, index);
2855   assert_different_registers(method, cache, flags);
2856 
2857   // determine constant pool cache field offsets
2858   resolve_cache_and_index_for_method(f1_byte, cache, index);
2859   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2860   __ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2861 }
2862 
2863 void TemplateTable::load_resolved_method_entry_handle(Register cache,
2864                                                Register method,
2865                                                Register ref_index,
2866                                                Register flags) {
2867   // setup registers
2868   const Register index = rdx;
2869   assert_different_registers(cache, index);
2870   assert_different_registers(cache, method, ref_index, flags);
2871 
2872   // determine constant pool cache field offsets
2873   resolve_cache_and_index_for_method(f1_byte, cache, index);
2874   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2875 
2876   // Maybe push appendix
2877   Label L_no_push;
2878   __ testl(flags, (1 << ResolvedMethodEntry::has_appendix_shift));
2879   __ jcc(Assembler::zero, L_no_push);
2880   // invokehandle uses an index into the resolved references array
2881   __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
2882   // Push the appendix as a trailing parameter.
2883   // This must be done before we get the receiver,
2884   // since the parameter_size includes it.
2885   Register appendix = method;
2886   __ load_resolved_reference_at_index(appendix, ref_index);
2887   __ push(appendix);  // push appendix (MethodType, CallSite, etc.)
2888   __ bind(L_no_push);
2889 
2890   __ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2891 }
2892 
2893 void TemplateTable::load_resolved_method_entry_interface(Register cache,
2894                                                          Register klass,
2895                                                          Register method_or_table_index,
2896                                                          Register flags) {
2897   // setup registers
2898   const Register index = rdx;
2899   assert_different_registers(cache, klass, method_or_table_index, flags);
2900 
2901   // determine constant pool cache field offsets
2902   resolve_cache_and_index_for_method(f1_byte, cache, index);
2903   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2904 
2905   // Invokeinterface can behave in different ways:
2906   // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
2907   // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
2908   // vtable index is placed in the register.
2909   // Otherwise, the registers will be populated with the klass and method.
2910 
2911   Label NotVirtual; Label NotVFinal; Label Done;
2912   __ testl(flags, 1 << ResolvedMethodEntry::is_forced_virtual_shift);
2913   __ jcc(Assembler::zero, NotVirtual);
2914   __ testl(flags, (1 << ResolvedMethodEntry::is_vfinal_shift));
2915   __ jcc(Assembler::zero, NotVFinal);
2916   __ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2917   __ jmp(Done);
2918 
2919   __ bind(NotVFinal);
2920   __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2921   __ jmp(Done);
2922 
2923   __ bind(NotVirtual);
2924   __ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2925   __ movptr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
2926   __ bind(Done);
2927 }
2928 
2929 void TemplateTable::load_resolved_method_entry_virtual(Register cache,
2930                                                        Register method_or_table_index,
2931                                                        Register flags) {
2932   // setup registers
2933   const Register index = rdx;
2934   assert_different_registers(index, cache);
2935   assert_different_registers(method_or_table_index, cache, flags);
2936 
2937   // determine constant pool cache field offsets
2938   resolve_cache_and_index_for_method(f2_byte, cache, index);
2939   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2940 
2941   // method_or_table_index can either be an itable index or a method depending on the virtual final flag
2942   Label isVFinal; Label Done;
2943   __ testl(flags, (1 << ResolvedMethodEntry::is_vfinal_shift));
2944   __ jcc(Assembler::notZero, isVFinal);
2945   __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2946   __ jmp(Done);
2947   __ bind(isVFinal);
2948   __ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2949   __ bind(Done);
2950 }
2951 
2952 // The registers cache and index expected to be set before call.
2953 // Correct values of the cache and index registers are preserved.
2954 void TemplateTable::jvmti_post_field_access(Register cache,
2955                                             Register index,
2956                                             bool is_static,
2957                                             bool has_tos) {
2958   if (JvmtiExport::can_post_field_access()) {
2959     // Check to see if a field access watch has been set before we take
2960     // the time to call into the VM.
2961     Label L1;
2962     assert_different_registers(cache, index, rax);
2963     __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2964     __ testl(rax,rax);
2965     __ jcc(Assembler::zero, L1);
2966 
2967     // cache entry pointer
2968     __ load_field_entry(cache, index);
2969     if (is_static) {
2970       __ xorptr(rax, rax);      // null object reference
2971     } else {
2972       __ pop(atos);         // Get the object
2973       __ verify_oop(rax);
2974       __ push(atos);        // Restore stack state
2975     }
2976     // rax,:   object pointer or null
2977     // cache: cache entry pointer
2978     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2979               rax, cache);
2980 
2981     __ load_field_entry(cache, index);
2982     __ bind(L1);
2983   }
2984 }
2985 
2986 void TemplateTable::pop_and_check_object(Register r) {
2987   __ pop_ptr(r);
2988   __ null_check(r);  // for field access must check obj.
2989   __ verify_oop(r);
2990 }
2991 
2992 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2993   transition(vtos, vtos);
2994 
2995   const Register obj   = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2996   const Register cache = rcx;
2997   const Register index = rdx;
2998   const Register off   = rbx;
2999   const Register tos_state   = rax;
3000   const Register flags = rdx;
3001   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
3002 
3003   resolve_cache_and_index_for_field(byte_no, cache, index);
3004   jvmti_post_field_access(cache, index, is_static, false);
3005   load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
3006 
3007   if (!is_static) pop_and_check_object(obj);
3008 
3009   const Address field(obj, off, Address::times_1, 0*wordSize);
3010 
3011   Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj;
3012 
3013   // Make sure we don't need to mask edx after the above shift
3014   assert(btos == 0, "change code, btos != 0");
3015   __ testl(tos_state, tos_state);
3016   __ jcc(Assembler::notZero, notByte);
3017 
3018   // btos
3019   __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3020   __ push(btos);
3021   // Rewrite bytecode to be faster
3022   if (!is_static && rc == may_rewrite) {
3023     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
3024   }
3025   __ jmp(Done);
3026 
3027   __ bind(notByte);
3028   __ cmpl(tos_state, ztos);
3029   __ jcc(Assembler::notEqual, notBool);
3030 
3031   // ztos (same code as btos)
3032   __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
3033   __ push(ztos);
3034   // Rewrite bytecode to be faster
3035   if (!is_static && rc == may_rewrite) {
3036     // use btos rewriting, no truncating to t/f bit is needed for getfield.
3037     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
3038   }
3039   __ jmp(Done);
3040 
3041   __ bind(notBool);
3042   __ cmpl(tos_state, atos);
3043   __ jcc(Assembler::notEqual, notObj);
3044   // atos
3045   do_oop_load(_masm, field, rax);
3046   __ push(atos);
3047   if (!is_static && rc == may_rewrite) {
3048     patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
3049   }
3050   __ jmp(Done);
3051 
3052   __ bind(notObj);
3053   __ cmpl(tos_state, itos);
3054   __ jcc(Assembler::notEqual, notInt);
3055   // itos
3056   __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3057   __ push(itos);
3058   // Rewrite bytecode to be faster
3059   if (!is_static && rc == may_rewrite) {
3060     patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
3061   }
3062   __ jmp(Done);
3063 
3064   __ bind(notInt);
3065   __ cmpl(tos_state, ctos);
3066   __ jcc(Assembler::notEqual, notChar);
3067   // ctos
3068   __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3069   __ push(ctos);
3070   // Rewrite bytecode to be faster
3071   if (!is_static && rc == may_rewrite) {
3072     patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
3073   }
3074   __ jmp(Done);
3075 
3076   __ bind(notChar);
3077   __ cmpl(tos_state, stos);
3078   __ jcc(Assembler::notEqual, notShort);
3079   // stos
3080   __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3081   __ push(stos);
3082   // Rewrite bytecode to be faster
3083   if (!is_static && rc == may_rewrite) {
3084     patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
3085   }
3086   __ jmp(Done);
3087 
3088   __ bind(notShort);
3089   __ cmpl(tos_state, ltos);
3090   __ jcc(Assembler::notEqual, notLong);
3091   // ltos
3092     // Generate code as if volatile (x86_32).  There just aren't enough registers to
3093     // save that information and this code is faster than the test.
3094   __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
3095   __ push(ltos);
3096   // Rewrite bytecode to be faster
3097   LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
3098   __ jmp(Done);
3099 
3100   __ bind(notLong);
3101   __ cmpl(tos_state, ftos);
3102   __ jcc(Assembler::notEqual, notFloat);
3103   // ftos
3104 
3105   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3106   __ push(ftos);
3107   // Rewrite bytecode to be faster
3108   if (!is_static && rc == may_rewrite) {
3109     patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
3110   }
3111   __ jmp(Done);
3112 
3113   __ bind(notFloat);
3114 #ifdef ASSERT
3115   Label notDouble;
3116   __ cmpl(tos_state, dtos);
3117   __ jcc(Assembler::notEqual, notDouble);
3118 #endif
3119   // dtos
3120   // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3121   __ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg, noreg);
3122   __ push(dtos);
3123   // Rewrite bytecode to be faster
3124   if (!is_static && rc == may_rewrite) {
3125     patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
3126   }
3127 #ifdef ASSERT
3128   __ jmp(Done);
3129 
3130   __ bind(notDouble);
3131   __ stop("Bad state");
3132 #endif
3133 
3134   __ bind(Done);
3135   // [jk] not needed currently
3136   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
3137   //                                              Assembler::LoadStore));
3138 }
3139 
3140 void TemplateTable::getfield(int byte_no) {
3141   getfield_or_static(byte_no, false);
3142 }
3143 
3144 void TemplateTable::nofast_getfield(int byte_no) {
3145   getfield_or_static(byte_no, false, may_not_rewrite);
3146 }
3147 
3148 void TemplateTable::getstatic(int byte_no) {
3149   getfield_or_static(byte_no, true);
3150 }
3151 
3152 
3153 // The registers cache and index expected to be set before call.
3154 // The function may destroy various registers, just not the cache and index registers.
3155 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
3156   // Cache is rcx and index is rdx
3157   const Register entry = LP64_ONLY(c_rarg2) NOT_LP64(rax); // ResolvedFieldEntry
3158   const Register obj = LP64_ONLY(c_rarg1) NOT_LP64(rbx);   // Object pointer
3159   const Register value = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // JValue object
3160 
3161   if (JvmtiExport::can_post_field_modification()) {
3162     // Check to see if a field modification watch has been set before
3163     // we take the time to call into the VM.
3164     Label L1;
3165     assert_different_registers(cache, obj, rax);
3166     __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3167     __ testl(rax, rax);
3168     __ jcc(Assembler::zero, L1);
3169 
3170     __ mov(entry, cache);
3171 
3172     if (is_static) {
3173       // Life is simple.  Null out the object pointer.
3174       __ xorl(obj, obj);
3175 
3176     } else {
3177       // Life is harder. The stack holds the value on top, followed by
3178       // the object.  We don't know the size of the value, though; it
3179       // could be one or two words depending on its type. As a result,
3180       // we must find the type to determine where the object is.
3181 #ifndef _LP64
3182       Label two_word, valsize_known;
3183 #endif
3184       __ load_unsigned_byte(value, Address(entry, in_bytes(ResolvedFieldEntry::type_offset())));
3185 #ifdef _LP64
3186       __ movptr(obj, at_tos_p1());  // initially assume a one word jvalue
3187       __ cmpl(value, ltos);
3188       __ cmovptr(Assembler::equal,
3189                  obj, at_tos_p2()); // ltos (two word jvalue)
3190       __ cmpl(value, dtos);
3191       __ cmovptr(Assembler::equal,
3192                  obj, at_tos_p2()); // dtos (two word jvalue)
3193 #else
3194       __ mov(obj, rsp);
3195       __ cmpl(value, ltos);
3196       __ jccb(Assembler::equal, two_word);
3197       __ cmpl(value, dtos);
3198       __ jccb(Assembler::equal, two_word);
3199       __ addptr(obj, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3200       __ jmpb(valsize_known);
3201 
3202       __ bind(two_word);
3203       __ addptr(obj, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3204 
3205       __ bind(valsize_known);
3206       // setup object pointer
3207       __ movptr(obj, Address(obj, 0));
3208 #endif
3209     }
3210 
3211     // object (tos)
3212     __ mov(value, rsp);
3213     // obj: object pointer set up above (null if static)
3214     // cache: field entry pointer
3215     // value: jvalue object on the stack
3216     __ call_VM(noreg,
3217               CAST_FROM_FN_PTR(address,
3218                               InterpreterRuntime::post_field_modification),
3219               obj, entry, value);
3220     // Reload field entry
3221     __ load_field_entry(cache, index);
3222     __ bind(L1);
3223   }
3224 }
3225 
3226 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3227   transition(vtos, vtos);
3228 
3229   const Register obj = rcx;
3230   const Register cache = rcx;
3231   const Register index = rdx;
3232   const Register tos_state   = rdx;
3233   const Register off   = rbx;
3234   const Register flags = rax;
3235 
3236   resolve_cache_and_index_for_field(byte_no, cache, index);
3237   jvmti_post_field_mod(cache, index, is_static);
3238   load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
3239 
3240   // [jk] not needed currently
3241   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3242   //                                              Assembler::StoreStore));
3243 
3244   Label notVolatile, Done;
3245 
3246   // Check for volatile store
3247   __ andl(flags, (1 << ResolvedFieldEntry::is_volatile_shift));
3248   __ testl(flags, flags);
3249   __ jcc(Assembler::zero, notVolatile);
3250 
3251   putfield_or_static_helper(byte_no, is_static, rc, obj, off, tos_state);
3252   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3253                                                Assembler::StoreStore));
3254   __ jmp(Done);
3255   __ bind(notVolatile);
3256 
3257   putfield_or_static_helper(byte_no, is_static, rc, obj, off, tos_state);
3258 
3259   __ bind(Done);
3260 }
3261 
3262 void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
3263                                               Register obj, Register off, Register tos_state) {
3264 
3265   // field addresses
3266   const Address field(obj, off, Address::times_1, 0*wordSize);
3267   NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3268 
3269   Label notByte, notBool, notInt, notShort, notChar,
3270         notLong, notFloat, notObj;
3271   Label Done;
3272 
3273   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3274 
3275   // Test TOS state
3276   __ testl(tos_state, tos_state);
3277   __ jcc(Assembler::notZero, notByte);
3278 
3279   // btos
3280   {
3281     __ pop(btos);
3282     if (!is_static) pop_and_check_object(obj);
3283     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg, noreg);
3284     if (!is_static && rc == may_rewrite) {
3285       patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3286     }
3287     __ jmp(Done);
3288   }
3289 
3290   __ bind(notByte);
3291   __ cmpl(tos_state, ztos);
3292   __ jcc(Assembler::notEqual, notBool);
3293 
3294   // ztos
3295   {
3296     __ pop(ztos);
3297     if (!is_static) pop_and_check_object(obj);
3298     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg, noreg);
3299     if (!is_static && rc == may_rewrite) {
3300       patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3301     }
3302     __ jmp(Done);
3303   }
3304 
3305   __ bind(notBool);
3306   __ cmpl(tos_state, atos);
3307   __ jcc(Assembler::notEqual, notObj);
3308 
3309   // atos
3310   {
3311     __ pop(atos);
3312     if (!is_static) pop_and_check_object(obj);
3313     // Store into the field
3314     do_oop_store(_masm, field, rax);
3315     if (!is_static && rc == may_rewrite) {
3316       patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3317     }
3318     __ jmp(Done);
3319   }
3320 
3321   __ bind(notObj);
3322   __ cmpl(tos_state, itos);
3323   __ jcc(Assembler::notEqual, notInt);
3324 
3325   // itos
3326   {
3327     __ pop(itos);
3328     if (!is_static) pop_and_check_object(obj);
3329     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg, noreg);
3330     if (!is_static && rc == may_rewrite) {
3331       patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3332     }
3333     __ jmp(Done);
3334   }
3335 
3336   __ bind(notInt);
3337   __ cmpl(tos_state, ctos);
3338   __ jcc(Assembler::notEqual, notChar);
3339 
3340   // ctos
3341   {
3342     __ pop(ctos);
3343     if (!is_static) pop_and_check_object(obj);
3344     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg, noreg);
3345     if (!is_static && rc == may_rewrite) {
3346       patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3347     }
3348     __ jmp(Done);
3349   }
3350 
3351   __ bind(notChar);
3352   __ cmpl(tos_state, stos);
3353   __ jcc(Assembler::notEqual, notShort);
3354 
3355   // stos
3356   {
3357     __ pop(stos);
3358     if (!is_static) pop_and_check_object(obj);
3359     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg, noreg);
3360     if (!is_static && rc == may_rewrite) {
3361       patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3362     }
3363     __ jmp(Done);
3364   }
3365 
3366   __ bind(notShort);
3367   __ cmpl(tos_state, ltos);
3368   __ jcc(Assembler::notEqual, notLong);
3369 
3370   // ltos
3371   {
3372     __ pop(ltos);
3373     if (!is_static) pop_and_check_object(obj);
3374     // MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
3375     __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg, noreg);
3376 #ifdef _LP64
3377     if (!is_static && rc == may_rewrite) {
3378       patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3379     }
3380 #endif // _LP64
3381     __ jmp(Done);
3382   }
3383 
3384   __ bind(notLong);
3385   __ cmpl(tos_state, ftos);
3386   __ jcc(Assembler::notEqual, notFloat);
3387 
3388   // ftos
3389   {
3390     __ pop(ftos);
3391     if (!is_static) pop_and_check_object(obj);
3392     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3393     if (!is_static && rc == may_rewrite) {
3394       patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3395     }
3396     __ jmp(Done);
3397   }
3398 
3399   __ bind(notFloat);
3400 #ifdef ASSERT
3401   Label notDouble;
3402   __ cmpl(tos_state, dtos);
3403   __ jcc(Assembler::notEqual, notDouble);
3404 #endif
3405 
3406   // dtos
3407   {
3408     __ pop(dtos);
3409     if (!is_static) pop_and_check_object(obj);
3410     // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3411     __ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg, noreg);
3412     if (!is_static && rc == may_rewrite) {
3413       patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3414     }
3415   }
3416 
3417 #ifdef ASSERT
3418   __ jmp(Done);
3419 
3420   __ bind(notDouble);
3421   __ stop("Bad state");
3422 #endif
3423 
3424   __ bind(Done);
3425 }
3426 
3427 void TemplateTable::putfield(int byte_no) {
3428   putfield_or_static(byte_no, false);
3429 }
3430 
3431 void TemplateTable::nofast_putfield(int byte_no) {
3432   putfield_or_static(byte_no, false, may_not_rewrite);
3433 }
3434 
3435 void TemplateTable::putstatic(int byte_no) {
3436   putfield_or_static(byte_no, true);
3437 }
3438 
3439 void TemplateTable::jvmti_post_fast_field_mod() {
3440 
3441   const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3442 
3443   if (JvmtiExport::can_post_field_modification()) {
3444     // Check to see if a field modification watch has been set before
3445     // we take the time to call into the VM.
3446     Label L2;
3447     __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3448     __ testl(scratch, scratch);
3449     __ jcc(Assembler::zero, L2);
3450     __ pop_ptr(rbx);                  // copy the object pointer from tos
3451     __ verify_oop(rbx);
3452     __ push_ptr(rbx);                 // put the object pointer back on tos
3453     // Save tos values before call_VM() clobbers them. Since we have
3454     // to do it for every data type, we use the saved values as the
3455     // jvalue object.
3456     switch (bytecode()) {          // load values into the jvalue object
3457     case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3458     case Bytecodes::_fast_bputfield: // fall through
3459     case Bytecodes::_fast_zputfield: // fall through
3460     case Bytecodes::_fast_sputfield: // fall through
3461     case Bytecodes::_fast_cputfield: // fall through
3462     case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3463     case Bytecodes::_fast_dputfield: __ push(dtos); break;
3464     case Bytecodes::_fast_fputfield: __ push(ftos); break;
3465     case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3466 
3467     default:
3468       ShouldNotReachHere();
3469     }
3470     __ mov(scratch, rsp);             // points to jvalue on the stack
3471     // access constant pool cache entry
3472     LP64_ONLY(__ load_field_entry(c_rarg2, rax));
3473     NOT_LP64(__ load_field_entry(rax, rdx));
3474     __ verify_oop(rbx);
3475     // rbx: object pointer copied above
3476     // c_rarg2: cache entry pointer
3477     // c_rarg3: jvalue object on the stack
3478     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3479     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3480 
3481     switch (bytecode()) {             // restore tos values
3482     case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3483     case Bytecodes::_fast_bputfield: // fall through
3484     case Bytecodes::_fast_zputfield: // fall through
3485     case Bytecodes::_fast_sputfield: // fall through
3486     case Bytecodes::_fast_cputfield: // fall through
3487     case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3488     case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3489     case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3490     case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3491     default: break;
3492     }
3493     __ bind(L2);
3494   }
3495 }
3496 
3497 void TemplateTable::fast_storefield(TosState state) {
3498   transition(state, vtos);
3499 
3500   Register cache = rcx;
3501 
3502   Label notVolatile, Done;
3503 
3504   jvmti_post_fast_field_mod();
3505 
3506   __ push(rax);
3507   __ load_field_entry(rcx, rax);
3508   load_resolved_field_entry(noreg, cache, rax, rbx, rdx);
3509   // RBX: field offset, RAX: TOS, RDX: flags
3510   __ andl(rdx, (1 << ResolvedFieldEntry::is_volatile_shift));
3511   __ pop(rax);
3512 
3513   // Get object from stack
3514   pop_and_check_object(rcx);
3515 
3516   // field address
3517   const Address field(rcx, rbx, Address::times_1);
3518 
3519   // Check for volatile store
3520   __ testl(rdx, rdx);
3521   __ jcc(Assembler::zero, notVolatile);
3522 
3523   fast_storefield_helper(field, rax);
3524   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3525                                                Assembler::StoreStore));
3526   __ jmp(Done);
3527   __ bind(notVolatile);
3528 
3529   fast_storefield_helper(field, rax);
3530 
3531   __ bind(Done);
3532 }
3533 
3534 void TemplateTable::fast_storefield_helper(Address field, Register rax) {
3535 
3536   // access field
3537   switch (bytecode()) {
3538   case Bytecodes::_fast_aputfield:
3539     do_oop_store(_masm, field, rax);
3540     break;
3541   case Bytecodes::_fast_lputfield:
3542 #ifdef _LP64
3543     __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg, noreg);
3544 #else
3545   __ stop("should not be rewritten");
3546 #endif
3547     break;
3548   case Bytecodes::_fast_iputfield:
3549     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg, noreg);
3550     break;
3551   case Bytecodes::_fast_zputfield:
3552     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg, noreg);
3553     break;
3554   case Bytecodes::_fast_bputfield:
3555     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg, noreg);
3556     break;
3557   case Bytecodes::_fast_sputfield:
3558     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg, noreg);
3559     break;
3560   case Bytecodes::_fast_cputfield:
3561     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg, noreg);
3562     break;
3563   case Bytecodes::_fast_fputfield:
3564     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg, noreg);
3565     break;
3566   case Bytecodes::_fast_dputfield:
3567     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg, noreg);
3568     break;
3569   default:
3570     ShouldNotReachHere();
3571   }
3572 }
3573 
3574 void TemplateTable::fast_accessfield(TosState state) {
3575   transition(atos, state);
3576 
3577   // Do the JVMTI work here to avoid disturbing the register state below
3578   if (JvmtiExport::can_post_field_access()) {
3579     // Check to see if a field access watch has been set before we
3580     // take the time to call into the VM.
3581     Label L1;
3582     __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3583     __ testl(rcx, rcx);
3584     __ jcc(Assembler::zero, L1);
3585     // access constant pool cache entry
3586     LP64_ONLY(__ load_field_entry(c_rarg2, rcx));
3587     NOT_LP64(__ load_field_entry(rcx, rdx));
3588     __ verify_oop(rax);
3589     __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
3590     LP64_ONLY(__ mov(c_rarg1, rax));
3591     // c_rarg1: object pointer copied above
3592     // c_rarg2: cache entry pointer
3593     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3594     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3595     __ pop_ptr(rax); // restore object pointer
3596     __ bind(L1);
3597   }
3598 
3599   // access constant pool cache
3600   __ load_field_entry(rcx, rbx);
3601   __ load_sized_value(rbx, Address(rcx, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3602 
3603   // rax: object
3604   __ verify_oop(rax);
3605   __ null_check(rax);
3606   Address field(rax, rbx, Address::times_1);
3607 
3608   // access field
3609   switch (bytecode()) {
3610   case Bytecodes::_fast_agetfield:
3611     do_oop_load(_masm, field, rax);
3612     __ verify_oop(rax);
3613     break;
3614   case Bytecodes::_fast_lgetfield:
3615 #ifdef _LP64
3616     __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
3617 #else
3618   __ stop("should not be rewritten");
3619 #endif
3620     break;
3621   case Bytecodes::_fast_igetfield:
3622     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3623     break;
3624   case Bytecodes::_fast_bgetfield:
3625     __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3626     break;
3627   case Bytecodes::_fast_sgetfield:
3628     __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3629     break;
3630   case Bytecodes::_fast_cgetfield:
3631     __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3632     break;
3633   case Bytecodes::_fast_fgetfield:
3634     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3635     break;
3636   case Bytecodes::_fast_dgetfield:
3637     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3638     break;
3639   default:
3640     ShouldNotReachHere();
3641   }
3642   // [jk] not needed currently
3643   //   Label notVolatile;
3644   //   __ testl(rdx, rdx);
3645   //   __ jcc(Assembler::zero, notVolatile);
3646   //   __ membar(Assembler::LoadLoad);
3647   //   __ bind(notVolatile);
3648 }
3649 
3650 void TemplateTable::fast_xaccess(TosState state) {
3651   transition(vtos, state);
3652 
3653   // get receiver
3654   __ movptr(rax, aaddress(0));
3655   // access constant pool cache
3656   __ load_field_entry(rcx, rdx, 2);
3657   __ load_sized_value(rbx, Address(rcx, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3658 
3659   // make sure exception is reported in correct bcp range (getfield is
3660   // next instruction)
3661   __ increment(rbcp);
3662   __ null_check(rax);
3663   const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3664   switch (state) {
3665   case itos:
3666     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3667     break;
3668   case atos:
3669     do_oop_load(_masm, field, rax);
3670     __ verify_oop(rax);
3671     break;
3672   case ftos:
3673     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3674     break;
3675   default:
3676     ShouldNotReachHere();
3677   }
3678 
3679   // [jk] not needed currently
3680   // Label notVolatile;
3681   // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3682   //                      in_bytes(ConstantPoolCache::base_offset() +
3683   //                               ConstantPoolCacheEntry::flags_offset())));
3684   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3685   // __ testl(rdx, 0x1);
3686   // __ jcc(Assembler::zero, notVolatile);
3687   // __ membar(Assembler::LoadLoad);
3688   // __ bind(notVolatile);
3689 
3690   __ decrement(rbcp);
3691 }
3692 
3693 //-----------------------------------------------------------------------------
3694 // Calls
3695 
3696 void TemplateTable::prepare_invoke(Register cache, Register recv, Register flags) {
3697   // determine flags
3698   const Bytecodes::Code code = bytecode();
3699   const bool load_receiver       = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
3700   assert_different_registers(recv, flags);
3701 
3702   // save 'interpreter return address'
3703   __ save_bcp();
3704 
3705   // Save flags and load TOS
3706   __ movl(rbcp, flags);
3707   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
3708 
3709   // load receiver if needed (after appendix is pushed so parameter size is correct)
3710   // Note: no return address pushed yet
3711   if (load_receiver) {
3712     __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
3713     const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
3714     const int receiver_is_at_end      = -1;  // back off one slot to get receiver
3715     Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3716     __ movptr(recv, recv_addr);
3717     __ verify_oop(recv);
3718   }
3719 
3720   // load return address
3721   {
3722     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3723     ExternalAddress table(table_addr);
3724 #ifdef _LP64
3725     __ lea(rscratch1, table);
3726     __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
3727 #else
3728     __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
3729 #endif // _LP64
3730   }
3731 
3732   // push return address
3733   __ push(flags);
3734 
3735   // Restore flags value from the constant pool cache entry, and restore rsi
3736   // for later null checks.  r13 is the bytecode pointer
3737   __ movl(flags, rbcp);
3738   __ restore_bcp();
3739 }
3740 
3741 void TemplateTable::invokevirtual_helper(Register index,
3742                                          Register recv,
3743                                          Register flags) {
3744   // Uses temporary registers rax, rdx
3745   assert_different_registers(index, recv, rax, rdx);
3746   assert(index == rbx, "");
3747   assert(recv  == rcx, "");
3748 
3749   // Test for an invoke of a final method
3750   Label notFinal;
3751   __ movl(rax, flags);
3752   __ andl(rax, (1 << ResolvedMethodEntry::is_vfinal_shift));
3753   __ jcc(Assembler::zero, notFinal);
3754 
3755   const Register method = index;  // method must be rbx
3756   assert(method == rbx,
3757          "Method* must be rbx for interpreter calling convention");
3758 
3759   // do the call - the index is actually the method to call
3760   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3761 
3762   // It's final, need a null check here!
3763   __ null_check(recv);
3764 
3765   // profile this call
3766   __ profile_final_call(rax);
3767   __ profile_arguments_type(rax, method, rbcp, true);
3768 
3769   __ jump_from_interpreted(method, rax);
3770 
3771   __ bind(notFinal);
3772 
3773   // get receiver klass
3774   __ load_klass(rax, recv, rscratch1);
3775 
3776   // profile this call
3777   __ profile_virtual_call(rax, rlocals, rdx);
3778   // get target Method* & entry point
3779   __ lookup_virtual_method(rax, index, method);
3780 
3781   __ profile_arguments_type(rdx, method, rbcp, true);
3782   __ jump_from_interpreted(method, rdx);
3783 }
3784 
3785 void TemplateTable::invokevirtual(int byte_no) {
3786   transition(vtos, vtos);
3787   assert(byte_no == f2_byte, "use this argument");
3788 
3789   load_resolved_method_entry_virtual(rcx,  // ResolvedMethodEntry*
3790                                      rbx,  // Method or itable index
3791                                      rdx); // Flags
3792   prepare_invoke(rcx,  // ResolvedMethodEntry*
3793                  rcx,  // Receiver
3794                  rdx); // flags
3795 
3796   // rbx: index
3797   // rcx: receiver
3798   // rdx: flags
3799   invokevirtual_helper(rbx, rcx, rdx);
3800 }
3801 
3802 void TemplateTable::invokespecial(int byte_no) {
3803   transition(vtos, vtos);
3804   assert(byte_no == f1_byte, "use this argument");
3805 
3806   load_resolved_method_entry_special_or_static(rcx,  // ResolvedMethodEntry*
3807                                                rbx,  // Method*
3808                                                rdx); // flags
3809   prepare_invoke(rcx,
3810                  rcx,  // get receiver also for null check
3811                  rdx); // flags
3812 
3813   __ verify_oop(rcx);
3814   __ null_check(rcx);
3815   // do the call
3816   __ profile_call(rax);
3817   __ profile_arguments_type(rax, rbx, rbcp, false);
3818   __ jump_from_interpreted(rbx, rax);
3819 }
3820 
3821 void TemplateTable::invokestatic(int byte_no) {
3822   transition(vtos, vtos);
3823   assert(byte_no == f1_byte, "use this argument");
3824 
3825   load_resolved_method_entry_special_or_static(rcx, // ResolvedMethodEntry*
3826                                                rbx, // Method*
3827                                                rdx  // flags
3828                                                );
3829   prepare_invoke(rcx, rcx, rdx);  // cache and flags
3830 
3831   // do the call
3832   __ profile_call(rax);
3833   __ profile_arguments_type(rax, rbx, rbcp, false);
3834   __ jump_from_interpreted(rbx, rax);
3835 }
3836 
3837 
3838 void TemplateTable::fast_invokevfinal(int byte_no) {
3839   transition(vtos, vtos);
3840   assert(byte_no == f2_byte, "use this argument");
3841   __ stop("fast_invokevfinal not used on x86");
3842 }
3843 
3844 
3845 void TemplateTable::invokeinterface(int byte_no) {
3846   transition(vtos, vtos);
3847   assert(byte_no == f1_byte, "use this argument");
3848 
3849   load_resolved_method_entry_interface(rcx,  // ResolvedMethodEntry*
3850                                        rax,  // Klass*
3851                                        rbx,  // Method* or itable/vtable index
3852                                        rdx); // flags
3853   prepare_invoke(rcx, rcx, rdx); // receiver, flags
3854 
3855   // First check for Object case, then private interface method,
3856   // then regular interface method.
3857 
3858   // Special case of invokeinterface called for virtual method of
3859   // java.lang.Object.  See cpCache.cpp for details.
3860   Label notObjectMethod;
3861   __ movl(rlocals, rdx);
3862   __ andl(rlocals, (1 << ResolvedMethodEntry::is_forced_virtual_shift));
3863   __ jcc(Assembler::zero, notObjectMethod);
3864 
3865   invokevirtual_helper(rbx, rcx, rdx);
3866   // no return from above
3867   __ bind(notObjectMethod);
3868 
3869   Label no_such_interface; // for receiver subtype check
3870   Register recvKlass; // used for exception processing
3871 
3872   // Check for private method invocation - indicated by vfinal
3873   Label notVFinal;
3874   __ movl(rlocals, rdx);
3875   __ andl(rlocals, (1 << ResolvedMethodEntry::is_vfinal_shift));
3876   __ jcc(Assembler::zero, notVFinal);
3877 
3878   // Get receiver klass into rlocals - also a null check
3879   __ load_klass(rlocals, rcx, rscratch1);
3880 
3881   Label subtype;
3882   __ check_klass_subtype(rlocals, rax, rbcp, subtype);
3883   // If we get here the typecheck failed
3884   recvKlass = rdx;
3885   __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
3886   __ jmp(no_such_interface);
3887 
3888   __ bind(subtype);
3889 
3890   // do the call - rbx is actually the method to call
3891 
3892   __ profile_final_call(rdx);
3893   __ profile_arguments_type(rdx, rbx, rbcp, true);
3894 
3895   __ jump_from_interpreted(rbx, rdx);
3896   // no return from above
3897   __ bind(notVFinal);
3898 
3899   // Get receiver klass into rdx - also a null check
3900   __ restore_locals();  // restore r14
3901   __ load_klass(rdx, rcx, rscratch1);
3902 
3903   Label no_such_method;
3904 
3905   // Preserve method for throw_AbstractMethodErrorVerbose.
3906   __ mov(rcx, rbx);
3907   // Receiver subtype check against REFC.
3908   // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
3909   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3910                              rdx, rax, noreg,
3911                              // outputs: scan temp. reg, scan temp. reg
3912                              rbcp, rlocals,
3913                              no_such_interface,
3914                              /*return_method=*/false);
3915 
3916   // profile this call
3917   __ restore_bcp(); // rbcp was destroyed by receiver type check
3918   __ profile_virtual_call(rdx, rbcp, rlocals);
3919 
3920   // Get declaring interface class from method, and itable index
3921   __ load_method_holder(rax, rbx);
3922   __ movl(rbx, Address(rbx, Method::itable_index_offset()));
3923   __ subl(rbx, Method::itable_index_max);
3924   __ negl(rbx);
3925 
3926   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3927   __ mov(rlocals, rdx);
3928   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3929                              rlocals, rax, rbx,
3930                              // outputs: method, scan temp. reg
3931                              rbx, rbcp,
3932                              no_such_interface);
3933 
3934   // rbx: Method* to call
3935   // rcx: receiver
3936   // Check for abstract method error
3937   // Note: This should be done more efficiently via a throw_abstract_method_error
3938   //       interpreter entry point and a conditional jump to it in case of a null
3939   //       method.
3940   __ testptr(rbx, rbx);
3941   __ jcc(Assembler::zero, no_such_method);
3942 
3943   __ profile_arguments_type(rdx, rbx, rbcp, true);
3944 
3945   // do the call
3946   // rcx: receiver
3947   // rbx,: Method*
3948   __ jump_from_interpreted(rbx, rdx);
3949   __ should_not_reach_here();
3950 
3951   // exception handling code follows...
3952   // note: must restore interpreter registers to canonical
3953   //       state for exception handling to work correctly!
3954 
3955   __ bind(no_such_method);
3956   // throw exception
3957   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3958   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
3959   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3960   // Pass arguments for generating a verbose error message.
3961 #ifdef _LP64
3962   recvKlass = c_rarg1;
3963   Register method    = c_rarg2;
3964   if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
3965   if (method != rcx)    { __ movq(method, rcx);    }
3966 #else
3967   recvKlass = rdx;
3968   Register method    = rcx;
3969 #endif
3970   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3971              recvKlass, method);
3972   // The call_VM checks for exception, so we should never return here.
3973   __ should_not_reach_here();
3974 
3975   __ bind(no_such_interface);
3976   // throw exception
3977   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3978   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
3979   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3980   // Pass arguments for generating a verbose error message.
3981   LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
3982   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3983              recvKlass, rax);
3984   // the call_VM checks for exception, so we should never return here.
3985   __ should_not_reach_here();
3986 }
3987 
3988 void TemplateTable::invokehandle(int byte_no) {
3989   transition(vtos, vtos);
3990   assert(byte_no == f1_byte, "use this argument");
3991   const Register rbx_method = rbx;
3992   const Register rax_mtype  = rax;
3993   const Register rcx_recv   = rcx;
3994   const Register rdx_flags  = rdx;
3995 
3996   load_resolved_method_entry_handle(rcx, rbx_method, rax_mtype, rdx_flags);
3997   prepare_invoke(rcx, rcx_recv, rdx_flags);
3998 
3999   __ verify_method_ptr(rbx_method);
4000   __ verify_oop(rcx_recv);
4001   __ null_check(rcx_recv);
4002 
4003   // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
4004   // rbx: MH.invokeExact_MT method
4005 
4006   // Note:  rax_mtype is already pushed (if necessary)
4007 
4008   // FIXME: profile the LambdaForm also
4009   __ profile_final_call(rax);
4010   __ profile_arguments_type(rdx, rbx_method, rbcp, true);
4011 
4012   __ jump_from_interpreted(rbx_method, rdx);
4013 }
4014 
4015 void TemplateTable::invokedynamic(int byte_no) {
4016   transition(vtos, vtos);
4017   assert(byte_no == f1_byte, "use this argument");
4018 
4019   const Register rbx_method   = rbx;
4020   const Register rax_callsite = rax;
4021 
4022   load_invokedynamic_entry(rbx_method);
4023   // rax: CallSite object (from cpool->resolved_references[])
4024   // rbx: MH.linkToCallSite method
4025 
4026   // Note:  rax_callsite is already pushed
4027 
4028   // %%% should make a type profile for any invokedynamic that takes a ref argument
4029   // profile this call
4030   __ profile_call(rbcp);
4031   __ profile_arguments_type(rdx, rbx_method, rbcp, false);
4032 
4033   __ verify_oop(rax_callsite);
4034 
4035   __ jump_from_interpreted(rbx_method, rdx);
4036 }
4037 
4038 //-----------------------------------------------------------------------------
4039 // Allocation
4040 
4041 void TemplateTable::_new() {
4042   transition(vtos, atos);
4043   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
4044   Label slow_case;
4045   Label slow_case_no_pop;
4046   Label done;
4047   Label initialize_header;
4048 
4049   __ get_cpool_and_tags(rcx, rax);
4050 
4051   // Make sure the class we're about to instantiate has been resolved.
4052   // This is done before loading InstanceKlass to be consistent with the order
4053   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4054   const int tags_offset = Array<u1>::base_offset_in_bytes();
4055   __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
4056   __ jcc(Assembler::notEqual, slow_case_no_pop);
4057 
4058   // get InstanceKlass
4059   __ load_resolved_klass_at_index(rcx, rcx, rdx);
4060   __ push(rcx);  // save the contexts of klass for initializing the header
4061 
4062   // make sure klass is initialized
4063   // init_state needs acquire, but x86 is TSO, and so we are already good.
4064 #ifdef _LP64
4065   assert(VM_Version::supports_fast_class_init_checks(), "must support fast class initialization checks");
4066   __ clinit_barrier(rcx, r15_thread, nullptr /*L_fast_path*/, &slow_case);
4067 #else
4068   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4069   __ jcc(Assembler::notEqual, slow_case);
4070 #endif
4071 
4072   // get instance_size in InstanceKlass (scaled to a count of bytes)
4073   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
4074   // test to see if it is malformed in some way
4075   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
4076   __ jcc(Assembler::notZero, slow_case);
4077 
4078   // Allocate the instance:
4079   //  If TLAB is enabled:
4080   //    Try to allocate in the TLAB.
4081   //    If fails, go to the slow path.
4082   //    Initialize the allocation.
4083   //    Exit.
4084   //
4085   //  Go to slow path.
4086 
4087   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
4088 
4089   if (UseTLAB) {
4090     NOT_LP64(__ get_thread(thread);)
4091     __ tlab_allocate(thread, rax, rdx, 0, rcx, rbx, slow_case);
4092     if (ZeroTLAB) {
4093       // the fields have been already cleared
4094       __ jmp(initialize_header);
4095     }
4096 
4097     // The object is initialized before the header.  If the object size is
4098     // zero, go directly to the header initialization.
4099     if (UseCompactObjectHeaders) {
4100       assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
4101       __ decrement(rdx, oopDesc::base_offset_in_bytes());
4102     } else {
4103       __ decrement(rdx, sizeof(oopDesc));
4104     }
4105     __ jcc(Assembler::zero, initialize_header);
4106 
4107     // Initialize topmost object field, divide rdx by 8, check if odd and
4108     // test if zero.
4109     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
4110     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4111 
4112     // rdx must have been multiple of 8
4113 #ifdef ASSERT
4114     // make sure rdx was multiple of 8
4115     Label L;
4116     // Ignore partial flag stall after shrl() since it is debug VM
4117     __ jcc(Assembler::carryClear, L);
4118     __ stop("object size is not multiple of 2 - adjust this code");
4119     __ bind(L);
4120     // rdx must be > 0, no extra check needed here
4121 #endif
4122 
4123     // initialize remaining object fields: rdx was a multiple of 8
4124     { Label loop;
4125     __ bind(loop);
4126     int header_size_bytes = oopDesc::header_size() * HeapWordSize;
4127     assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
4128     __ movptr(Address(rax, rdx, Address::times_8, header_size_bytes - 1*oopSize), rcx);
4129     NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, header_size_bytes - 2*oopSize), rcx));
4130     __ decrement(rdx);
4131     __ jcc(Assembler::notZero, loop);
4132     }
4133 
4134     // initialize object header only.
4135     __ bind(initialize_header);
4136     if (UseCompactObjectHeaders) {
4137       __ pop(rcx);   // get saved klass back in the register.
4138       __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
4139       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rbx);
4140     } else {
4141       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
4142                 (intptr_t)markWord::prototype().value()); // header
4143       __ pop(rcx);   // get saved klass back in the register.
4144 #ifdef _LP64
4145       __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4146       __ store_klass_gap(rax, rsi);  // zero klass gap for compressed oops
4147 #endif
4148       __ store_klass(rax, rcx, rscratch1);  // klass
4149     }
4150 
4151     if (DTraceAllocProbes) {
4152       // Trigger dtrace event for fastpath
4153       __ push(atos);
4154       __ call_VM_leaf(
4155            CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), rax);
4156       __ pop(atos);
4157     }
4158 
4159     __ jmp(done);
4160   }
4161 
4162   // slow case
4163   __ bind(slow_case);
4164   __ pop(rcx);   // restore stack pointer to what it was when we came in.
4165   __ bind(slow_case_no_pop);
4166 
4167   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4168   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4169 
4170   __ get_constant_pool(rarg1);
4171   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4172   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4173    __ verify_oop(rax);
4174 
4175   // continue
4176   __ bind(done);
4177 }
4178 
4179 void TemplateTable::newarray() {
4180   transition(itos, atos);
4181   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4182   __ load_unsigned_byte(rarg1, at_bcp(1));
4183   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4184           rarg1, rax);
4185 }
4186 
4187 void TemplateTable::anewarray() {
4188   transition(itos, atos);
4189 
4190   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4191   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4192 
4193   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4194   __ get_constant_pool(rarg1);
4195   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4196           rarg1, rarg2, rax);
4197 }
4198 
4199 void TemplateTable::arraylength() {
4200   transition(atos, itos);
4201   __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4202 }
4203 
4204 void TemplateTable::checkcast() {
4205   transition(atos, atos);
4206   Label done, is_null, ok_is_subtype, quicked, resolved;
4207   __ testptr(rax, rax); // object is in rax
4208   __ jcc(Assembler::zero, is_null);
4209 
4210   // Get cpool & tags index
4211   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4212   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4213   // See if bytecode has already been quicked
4214   __ cmpb(Address(rdx, rbx,
4215                   Address::times_1,
4216                   Array<u1>::base_offset_in_bytes()),
4217           JVM_CONSTANT_Class);
4218   __ jcc(Assembler::equal, quicked);
4219   __ push(atos); // save receiver for result, and for GC
4220   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4221 
4222   // vm_result_2 has metadata result
4223 #ifndef _LP64
4224   // borrow rdi from locals
4225   __ get_thread(rdi);
4226   __ get_vm_result_2(rax, rdi);
4227   __ restore_locals();
4228 #else
4229   __ get_vm_result_2(rax, r15_thread);
4230 #endif
4231 
4232   __ pop_ptr(rdx); // restore receiver
4233   __ jmpb(resolved);
4234 
4235   // Get superklass in rax and subklass in rbx
4236   __ bind(quicked);
4237   __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4238   __ load_resolved_klass_at_index(rax, rcx, rbx);
4239 
4240   __ bind(resolved);
4241   __ load_klass(rbx, rdx, rscratch1);
4242 
4243   // Generate subtype check.  Blows rcx, rdi.  Object in rdx.
4244   // Superklass in rax.  Subklass in rbx.
4245   __ gen_subtype_check(rbx, ok_is_subtype);
4246 
4247   // Come here on failure
4248   __ push_ptr(rdx);
4249   // object is at TOS
4250   __ jump(RuntimeAddress(Interpreter::_throw_ClassCastException_entry));
4251 
4252   // Come here on success
4253   __ bind(ok_is_subtype);
4254   __ mov(rax, rdx); // Restore object in rdx
4255 
4256   // Collect counts on whether this check-cast sees nulls a lot or not.
4257   if (ProfileInterpreter) {
4258     __ jmp(done);
4259     __ bind(is_null);
4260     __ profile_null_seen(rcx);
4261   } else {
4262     __ bind(is_null);   // same as 'done'
4263   }
4264   __ bind(done);
4265 }
4266 
4267 void TemplateTable::instanceof() {
4268   transition(atos, itos);
4269   Label done, is_null, ok_is_subtype, quicked, resolved;
4270   __ testptr(rax, rax);
4271   __ jcc(Assembler::zero, is_null);
4272 
4273   // Get cpool & tags index
4274   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4275   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4276   // See if bytecode has already been quicked
4277   __ cmpb(Address(rdx, rbx,
4278                   Address::times_1,
4279                   Array<u1>::base_offset_in_bytes()),
4280           JVM_CONSTANT_Class);
4281   __ jcc(Assembler::equal, quicked);
4282 
4283   __ push(atos); // save receiver for result, and for GC
4284   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4285   // vm_result_2 has metadata result
4286 
4287 #ifndef _LP64
4288   // borrow rdi from locals
4289   __ get_thread(rdi);
4290   __ get_vm_result_2(rax, rdi);
4291   __ restore_locals();
4292 #else
4293   __ get_vm_result_2(rax, r15_thread);
4294 #endif
4295 
4296   __ pop_ptr(rdx); // restore receiver
4297   __ verify_oop(rdx);
4298   __ load_klass(rdx, rdx, rscratch1);
4299   __ jmpb(resolved);
4300 
4301   // Get superklass in rax and subklass in rdx
4302   __ bind(quicked);
4303   __ load_klass(rdx, rax, rscratch1);
4304   __ load_resolved_klass_at_index(rax, rcx, rbx);
4305 
4306   __ bind(resolved);
4307 
4308   // Generate subtype check.  Blows rcx, rdi
4309   // Superklass in rax.  Subklass in rdx.
4310   __ gen_subtype_check(rdx, ok_is_subtype);
4311 
4312   // Come here on failure
4313   __ xorl(rax, rax);
4314   __ jmpb(done);
4315   // Come here on success
4316   __ bind(ok_is_subtype);
4317   __ movl(rax, 1);
4318 
4319   // Collect counts on whether this test sees nulls a lot or not.
4320   if (ProfileInterpreter) {
4321     __ jmp(done);
4322     __ bind(is_null);
4323     __ profile_null_seen(rcx);
4324   } else {
4325     __ bind(is_null);   // same as 'done'
4326   }
4327   __ bind(done);
4328   // rax = 0: obj == nullptr or  obj is not an instanceof the specified klass
4329   // rax = 1: obj != nullptr and obj is     an instanceof the specified klass
4330 }
4331 
4332 
4333 //----------------------------------------------------------------------------------------------------
4334 // Breakpoints
4335 void TemplateTable::_breakpoint() {
4336   // Note: We get here even if we are single stepping..
4337   // jbug insists on setting breakpoints at every bytecode
4338   // even if we are in single step mode.
4339 
4340   transition(vtos, vtos);
4341 
4342   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4343 
4344   // get the unpatched byte code
4345   __ get_method(rarg);
4346   __ call_VM(noreg,
4347              CAST_FROM_FN_PTR(address,
4348                               InterpreterRuntime::get_original_bytecode_at),
4349              rarg, rbcp);
4350   __ mov(rbx, rax);  // why?
4351 
4352   // post the breakpoint event
4353   __ get_method(rarg);
4354   __ call_VM(noreg,
4355              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4356              rarg, rbcp);
4357 
4358   // complete the execution of original bytecode
4359   __ dispatch_only_normal(vtos);
4360 }
4361 
4362 //-----------------------------------------------------------------------------
4363 // Exceptions
4364 
4365 void TemplateTable::athrow() {
4366   transition(atos, vtos);
4367   __ null_check(rax);
4368   __ jump(RuntimeAddress(Interpreter::throw_exception_entry()));
4369 }
4370 
4371 //-----------------------------------------------------------------------------
4372 // Synchronization
4373 //
4374 // Note: monitorenter & exit are symmetric routines; which is reflected
4375 //       in the assembly code structure as well
4376 //
4377 // Stack layout:
4378 //
4379 // [expressions  ] <--- rsp               = expression stack top
4380 // ..
4381 // [expressions  ]
4382 // [monitor entry] <--- monitor block top = expression stack bot
4383 // ..
4384 // [monitor entry]
4385 // [frame data   ] <--- monitor block bot
4386 // ...
4387 // [saved rbp    ] <--- rbp
4388 void TemplateTable::monitorenter() {
4389   transition(atos, vtos);
4390 
4391   // check for null object
4392   __ null_check(rax);
4393 
4394   const Address monitor_block_top(
4395         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4396   const Address monitor_block_bot(
4397         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4398   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
4399 
4400   Label allocated;
4401 
4402   Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4403   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4404   Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4405 
4406   // initialize entry pointer
4407   __ xorl(rmon, rmon); // points to free slot or null
4408 
4409   // find a free slot in the monitor block (result in rmon)
4410   {
4411     Label entry, loop, exit;
4412     __ movptr(rtop, monitor_block_top); // derelativize pointer
4413     __ lea(rtop, Address(rbp, rtop, Address::times_ptr));
4414     // rtop points to current entry, starting with top-most entry
4415 
4416     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4417                                         // of monitor block
4418     __ jmpb(entry);
4419 
4420     __ bind(loop);
4421     // check if current entry is used
4422     __ cmpptr(Address(rtop, BasicObjectLock::obj_offset()), NULL_WORD);
4423     // if not used then remember entry in rmon
4424     __ cmovptr(Assembler::equal, rmon, rtop);   // cmov => cmovptr
4425     // check if current entry is for same object
4426     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset()));
4427     // if same object then stop searching
4428     __ jccb(Assembler::equal, exit);
4429     // otherwise advance to next entry
4430     __ addptr(rtop, entry_size);
4431     __ bind(entry);
4432     // check if bottom reached
4433     __ cmpptr(rtop, rbot);
4434     // if not at bottom then check this entry
4435     __ jcc(Assembler::notEqual, loop);
4436     __ bind(exit);
4437   }
4438 
4439   __ testptr(rmon, rmon); // check if a slot has been found
4440   __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4441 
4442   // allocate one if there's no free slot
4443   {
4444     Label entry, loop;
4445     // 1. compute new pointers          // rsp: old expression stack top
4446     __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4447     __ lea(rmon, Address(rbp, rmon, Address::times_ptr));
4448     __ subptr(rsp, entry_size);         // move expression stack top
4449     __ subptr(rmon, entry_size);        // move expression stack bottom
4450     __ mov(rtop, rsp);                  // set start value for copy loop
4451     __ subptr(monitor_block_bot, entry_size / wordSize); // set new monitor block bottom
4452     __ jmp(entry);
4453     // 2. move expression stack contents
4454     __ bind(loop);
4455     __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4456                                                 // word from old location
4457     __ movptr(Address(rtop, 0), rbot);          // and store it at new location
4458     __ addptr(rtop, wordSize);                  // advance to next word
4459     __ bind(entry);
4460     __ cmpptr(rtop, rmon);                      // check if bottom reached
4461     __ jcc(Assembler::notEqual, loop);          // if not at bottom then
4462                                                 // copy next word
4463   }
4464 
4465   // call run-time routine
4466   // rmon: points to monitor entry
4467   __ bind(allocated);
4468 
4469   // Increment bcp to point to the next bytecode, so exception
4470   // handling for async. exceptions work correctly.
4471   // The object has already been popped from the stack, so the
4472   // expression stack looks correct.
4473   __ increment(rbcp);
4474 
4475   // store object
4476   __ movptr(Address(rmon, BasicObjectLock::obj_offset()), rax);
4477   __ lock_object(rmon);
4478 
4479   // check to make sure this monitor doesn't cause stack overflow after locking
4480   __ save_bcp();  // in case of exception
4481   __ generate_stack_overflow_check(0);
4482 
4483   // The bcp has already been incremented. Just need to dispatch to
4484   // next instruction.
4485   __ dispatch_next(vtos);
4486 }
4487 
4488 void TemplateTable::monitorexit() {
4489   transition(atos, vtos);
4490 
4491   // check for null object
4492   __ null_check(rax);
4493 
4494   const Address monitor_block_top(
4495         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4496   const Address monitor_block_bot(
4497         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4498   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
4499 
4500   Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4501   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4502 
4503   Label found;
4504 
4505   // find matching slot
4506   {
4507     Label entry, loop;
4508     __ movptr(rtop, monitor_block_top); // derelativize pointer
4509     __ lea(rtop, Address(rbp, rtop, Address::times_ptr));
4510     // rtop points to current entry, starting with top-most entry
4511 
4512     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4513                                         // of monitor block
4514     __ jmpb(entry);
4515 
4516     __ bind(loop);
4517     // check if current entry is for same object
4518     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset()));
4519     // if same object then stop searching
4520     __ jcc(Assembler::equal, found);
4521     // otherwise advance to next entry
4522     __ addptr(rtop, entry_size);
4523     __ bind(entry);
4524     // check if bottom reached
4525     __ cmpptr(rtop, rbot);
4526     // if not at bottom then check this entry
4527     __ jcc(Assembler::notEqual, loop);
4528   }
4529 
4530   // error handling. Unlocking was not block-structured
4531   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4532                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4533   __ should_not_reach_here();
4534 
4535   // call run-time routine
4536   __ bind(found);
4537   __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4538   __ unlock_object(rtop);
4539   __ pop_ptr(rax); // discard object
4540 }
4541 
4542 // Wide instructions
4543 void TemplateTable::wide() {
4544   transition(vtos, vtos);
4545   __ load_unsigned_byte(rbx, at_bcp(1));
4546   ExternalAddress wtable((address)Interpreter::_wentry_point);
4547   __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)), rscratch1);
4548   // Note: the rbcp increment step is part of the individual wide bytecode implementations
4549 }
4550 
4551 // Multi arrays
4552 void TemplateTable::multianewarray() {
4553   transition(vtos, atos);
4554 
4555   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4556   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4557   // last dim is on top of stack; we want address of first one:
4558   // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4559   // the latter wordSize to point to the beginning of the array.
4560   __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4561   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4562   __ load_unsigned_byte(rbx, at_bcp(3));
4563   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
4564 }