< prev index next >

src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp

Print this page

  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interpreter/bytecodeHistogram.hpp"
  32 #include "interpreter/interp_masm.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/interpreterRuntime.hpp"
  35 #include "interpreter/templateInterpreterGenerator.hpp"
  36 #include "interpreter/templateTable.hpp"
  37 #include "oops/arrayOop.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/method.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "prims/jvmtiExport.hpp"
  42 #include "prims/jvmtiThreadState.hpp"

  43 #include "runtime/deoptimization.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/jniHandles.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubRoutines.hpp"
  48 #include "runtime/synchronizer.hpp"
  49 #include "runtime/timer.hpp"
  50 #include "runtime/vframeArray.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/macros.hpp"
  53 
  54 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  55 
  56 // Size of interpreter code.  Increase if too small.  Interpreter will
  57 // fail with a guarantee ("not enough space for interpreter generation");
  58 // if too small.
  59 // Run with +PrintInterpreter to get the VM to print out the size.
  60 // Max size with JVMTI
  61 #ifdef AMD64
  62 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024;

 348   case T_FLOAT  : /* nothing to do */        break;
 349   case T_DOUBLE : /* nothing to do */        break;
 350 #endif // _LP64
 351 
 352   case T_OBJECT :
 353     // retrieve result from frame
 354     __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
 355     // and verify it
 356     __ verify_oop(rax);
 357     break;
 358   default       : ShouldNotReachHere();
 359   }
 360   __ ret(0);                                   // return from result handler
 361   return entry;
 362 }
 363 
 364 address TemplateInterpreterGenerator::generate_safept_entry_for(
 365         TosState state,
 366         address runtime_entry) {
 367   address entry = __ pc();



 368   __ push(state);


 369   __ call_VM(noreg, runtime_entry);



 370   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 371   return entry;
 372 }
 373 
 374 
 375 
 376 // Helpers for commoning out cases in the various type of method entries.
 377 //
 378 
 379 
 380 // increment invocation count & check for overflow
 381 //
 382 // Note: checking for negative value instead of overflow
 383 //       so we have a 'sticky' overflow test
 384 //
 385 // rbx: method
 386 // rcx: invocation counter
 387 //
 388 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 389   Label done;

 584     {
 585       Label L;
 586       __ testptr(rax, rax);
 587       __ jcc(Assembler::notZero, L);
 588       __ stop("synchronization object is NULL");
 589       __ bind(L);
 590     }
 591 #endif // ASSERT
 592 
 593     __ bind(done);
 594   }
 595 
 596   // add space for monitor & lock
 597   __ subptr(rsp, entry_size); // add space for a monitor entry
 598   __ movptr(monitor_block_top, rsp);  // set new monitor block top
 599   // store object
 600   __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
 601   const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
 602   __ movptr(lockreg, rsp); // object address
 603   __ lock_object(lockreg);




 604 }
 605 
 606 // Generate a fixed interpreter frame. This is identical setup for
 607 // interpreted methods and for native methods hence the shared code.
 608 //
 609 // Args:
 610 //      rax: return address
 611 //      rbx: Method*
 612 //      r14/rdi: pointer to locals
 613 //      r13/rsi: sender sp
 614 //      rdx: cp cache
 615 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 616   // initialize fixed part of activation frame
 617   __ push(rax);        // save return address
 618   __ enter();          // save old & set new rbp
 619   __ push(rbcp);        // set sender sp
 620   __ push((int)NULL_WORD); // leave last_sp as null
 621   __ movptr(rbcp, Address(rbx, Method::const_offset()));      // get ConstMethod*
 622   __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
 623   __ push(rbx);        // save Method*

 635   } else {
 636     __ push(0);
 637   }
 638 
 639   __ movptr(rdx, Address(rbx, Method::const_offset()));
 640   __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
 641   __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
 642   __ push(rdx); // set constant pool cache
 643   __ push(rlocals); // set locals pointer
 644   if (native_call) {
 645     __ push(0); // no bcp
 646   } else {
 647     __ push(rbcp); // set bcp
 648   }
 649   __ push(0); // reserve word for pointer to expression stack bottom
 650   __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
 651 }
 652 
 653 // End of helpers
 654 





















 655 // Method entry for java.lang.ref.Reference.get.
 656 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 657   // Code: _aload_0, _getfield, _areturn
 658   // parameter size = 1
 659   //
 660   // The code that gets generated by this routine is split into 2 parts:
 661   //    1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
 662   //    2. The slow path - which is an expansion of the regular method entry.
 663   //
 664   // Notes:-
 665   // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
 666   // * We may jump to the slow path iff the receiver is null. If the
 667   //   Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
 668   //   Thus we can use the regular method entry code to generate the NPE.
 669   //
 670   // rbx: Method*
 671 
 672   // r13: senderSP must preserve for slow path, set SP to it on fast path
 673 
 674   address entry = __ pc();

1192                             (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1193                                        wordSize - (int)sizeof(BasicObjectLock)));
1194 
1195       const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1196 
1197       // monitor expect in c_rarg1 for slow unlock path
1198       __ lea(regmon, monitor); // address of first monitor
1199 
1200       __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
1201       __ testptr(t, t);
1202       __ jcc(Assembler::notZero, unlock);
1203 
1204       // Entry already unlocked, need to throw exception
1205       __ MacroAssembler::call_VM(noreg,
1206                                  CAST_FROM_FN_PTR(address,
1207                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1208       __ should_not_reach_here();
1209 
1210       __ bind(unlock);
1211       __ unlock_object(regmon);


1212     }
1213     __ bind(L);
1214   }
1215 
1216   // jvmti support
1217   // Note: This must happen _after_ handling/throwing any exceptions since
1218   //       the exception handler code notifies the runtime of method exits
1219   //       too. If this happens before, method entry/exit notifications are
1220   //       not properly paired (was bug - gri 11/22/99).
1221   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1222 
1223   // restore potential result in edx:eax, call result handler to
1224   // restore potential result in ST0 & handle result
1225 
1226   __ pop(ltos);
1227   LP64_ONLY( __ pop(dtos));
1228 
1229   __ movptr(t, Address(rbp,
1230                        (frame::interpreter_frame_result_handler_offset) * wordSize));
1231   __ call(t);

1260   __ empty_expression_stack();
1261   __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
1262   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
1263 
1264   // throw exception
1265   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx);
1266   // the call_VM checks for exception, so we should never return here.
1267   __ should_not_reach_here();
1268 
1269   return entry_point;
1270 }
1271 
1272 //
1273 // Generic interpreted method entry to (asm) interpreter
1274 //
1275 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1276   // determine code generation flags
1277   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1278 
1279   // ebx: Method*
1280   // rbcp: sender sp
1281   address entry_point = __ pc();
1282 
1283   const Address constMethod(rbx, Method::const_offset());
1284   const Address access_flags(rbx, Method::access_flags_offset());
1285   const Address size_of_parameters(rdx,
1286                                    ConstMethod::size_of_parameters_offset());
1287   const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
1288 
1289 
1290   // get parameter size (always needed)
1291   __ movptr(rdx, constMethod);
1292   __ load_unsigned_short(rcx, size_of_parameters);
1293 
1294   // rbx: Method*
1295   // rcx: size of parameters
1296   // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
1297 
1298   __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1299   __ subl(rdx, rcx); // rdx = no. of additional locals
1300 

  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interpreter/bytecodeHistogram.hpp"
  32 #include "interpreter/interp_masm.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/interpreterRuntime.hpp"
  35 #include "interpreter/templateInterpreterGenerator.hpp"
  36 #include "interpreter/templateTable.hpp"
  37 #include "oops/arrayOop.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/method.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "prims/jvmtiExport.hpp"
  42 #include "prims/jvmtiThreadState.hpp"
  43 #include "runtime/continuation.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/jniHandles.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "runtime/synchronizer.hpp"
  50 #include "runtime/timer.hpp"
  51 #include "runtime/vframeArray.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  56 
  57 // Size of interpreter code.  Increase if too small.  Interpreter will
  58 // fail with a guarantee ("not enough space for interpreter generation");
  59 // if too small.
  60 // Run with +PrintInterpreter to get the VM to print out the size.
  61 // Max size with JVMTI
  62 #ifdef AMD64
  63 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024;

 349   case T_FLOAT  : /* nothing to do */        break;
 350   case T_DOUBLE : /* nothing to do */        break;
 351 #endif // _LP64
 352 
 353   case T_OBJECT :
 354     // retrieve result from frame
 355     __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
 356     // and verify it
 357     __ verify_oop(rax);
 358     break;
 359   default       : ShouldNotReachHere();
 360   }
 361   __ ret(0);                                   // return from result handler
 362   return entry;
 363 }
 364 
 365 address TemplateInterpreterGenerator::generate_safept_entry_for(
 366         TosState state,
 367         address runtime_entry) {
 368   address entry = __ pc();
 369   
 370   const Register rthread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
 371 
 372   __ push(state);
 373   NOT_LP64(__ get_thread(rthread);)
 374   __ push_cont_fastpath(rthread);
 375   __ call_VM(noreg, runtime_entry);
 376   NOT_LP64(__ get_thread(rthread);)
 377   __ pop_cont_fastpath(rthread);
 378 
 379   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 380   return entry;
 381 }
 382 
 383 
 384 
 385 // Helpers for commoning out cases in the various type of method entries.
 386 //
 387 
 388 
 389 // increment invocation count & check for overflow
 390 //
 391 // Note: checking for negative value instead of overflow
 392 //       so we have a 'sticky' overflow test
 393 //
 394 // rbx: method
 395 // rcx: invocation counter
 396 //
 397 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 398   Label done;

 593     {
 594       Label L;
 595       __ testptr(rax, rax);
 596       __ jcc(Assembler::notZero, L);
 597       __ stop("synchronization object is NULL");
 598       __ bind(L);
 599     }
 600 #endif // ASSERT
 601 
 602     __ bind(done);
 603   }
 604 
 605   // add space for monitor & lock
 606   __ subptr(rsp, entry_size); // add space for a monitor entry
 607   __ movptr(monitor_block_top, rsp);  // set new monitor block top
 608   // store object
 609   __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
 610   const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
 611   __ movptr(lockreg, rsp); // object address
 612   __ lock_object(lockreg);
 613 
 614   Register rthread = NOT_LP64(rax) LP64_ONLY(r15_thread);
 615   NOT_LP64(__ get_thread(rthread);)
 616   __ inc_held_monitor_count(rthread);
 617 }
 618 
 619 // Generate a fixed interpreter frame. This is identical setup for
 620 // interpreted methods and for native methods hence the shared code.
 621 //
 622 // Args:
 623 //      rax: return address
 624 //      rbx: Method*
 625 //      r14/rdi: pointer to locals
 626 //      r13/rsi: sender sp
 627 //      rdx: cp cache
 628 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 629   // initialize fixed part of activation frame
 630   __ push(rax);        // save return address
 631   __ enter();          // save old & set new rbp
 632   __ push(rbcp);        // set sender sp
 633   __ push((int)NULL_WORD); // leave last_sp as null
 634   __ movptr(rbcp, Address(rbx, Method::const_offset()));      // get ConstMethod*
 635   __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
 636   __ push(rbx);        // save Method*

 648   } else {
 649     __ push(0);
 650   }
 651 
 652   __ movptr(rdx, Address(rbx, Method::const_offset()));
 653   __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
 654   __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
 655   __ push(rdx); // set constant pool cache
 656   __ push(rlocals); // set locals pointer
 657   if (native_call) {
 658     __ push(0); // no bcp
 659   } else {
 660     __ push(rbcp); // set bcp
 661   }
 662   __ push(0); // reserve word for pointer to expression stack bottom
 663   __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
 664 }
 665 
 666 // End of helpers
 667 
 668 address TemplateInterpreterGenerator::generate_Continuation_doYield_entry(void) {
 669 #ifdef _LP64
 670   address entry = __ pc();
 671   assert(StubRoutines::cont_doYield() != NULL, "stub not yet generated");
 672 
 673   // __ movl(c_rarg1, Address(rsp, wordSize)); // scopes
 674   const Register thread1 = NOT_LP64(rdi) LP64_ONLY(r15_thread);
 675   NOT_LP64(__ get_thread(thread1));
 676   __ push_cont_fastpath(thread1);
 677   
 678   __ jump(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::cont_doYield())));
 679   // return value is in rax
 680 
 681   return entry;
 682 #else
 683   // Not implemented. Allow startup of legacy Java code that does not touch
 684   // Continuation.doYield yet. Throw AbstractMethodError on access.
 685   return generate_abstract_entry();
 686 #endif
 687 }
 688 
 689 // Method entry for java.lang.ref.Reference.get.
 690 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 691   // Code: _aload_0, _getfield, _areturn
 692   // parameter size = 1
 693   //
 694   // The code that gets generated by this routine is split into 2 parts:
 695   //    1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
 696   //    2. The slow path - which is an expansion of the regular method entry.
 697   //
 698   // Notes:-
 699   // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
 700   // * We may jump to the slow path iff the receiver is null. If the
 701   //   Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
 702   //   Thus we can use the regular method entry code to generate the NPE.
 703   //
 704   // rbx: Method*
 705 
 706   // r13: senderSP must preserve for slow path, set SP to it on fast path
 707 
 708   address entry = __ pc();

1226                             (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1227                                        wordSize - (int)sizeof(BasicObjectLock)));
1228 
1229       const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1230 
1231       // monitor expect in c_rarg1 for slow unlock path
1232       __ lea(regmon, monitor); // address of first monitor
1233 
1234       __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
1235       __ testptr(t, t);
1236       __ jcc(Assembler::notZero, unlock);
1237 
1238       // Entry already unlocked, need to throw exception
1239       __ MacroAssembler::call_VM(noreg,
1240                                  CAST_FROM_FN_PTR(address,
1241                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1242       __ should_not_reach_here();
1243 
1244       __ bind(unlock);
1245       __ unlock_object(regmon);
1246       NOT_LP64(__ get_thread(thread);)
1247       __ dec_held_monitor_count(thread);
1248     }
1249     __ bind(L);
1250   }
1251 
1252   // jvmti support
1253   // Note: This must happen _after_ handling/throwing any exceptions since
1254   //       the exception handler code notifies the runtime of method exits
1255   //       too. If this happens before, method entry/exit notifications are
1256   //       not properly paired (was bug - gri 11/22/99).
1257   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1258 
1259   // restore potential result in edx:eax, call result handler to
1260   // restore potential result in ST0 & handle result
1261 
1262   __ pop(ltos);
1263   LP64_ONLY( __ pop(dtos));
1264 
1265   __ movptr(t, Address(rbp,
1266                        (frame::interpreter_frame_result_handler_offset) * wordSize));
1267   __ call(t);

1296   __ empty_expression_stack();
1297   __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
1298   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
1299 
1300   // throw exception
1301   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx);
1302   // the call_VM checks for exception, so we should never return here.
1303   __ should_not_reach_here();
1304 
1305   return entry_point;
1306 }
1307 
1308 //
1309 // Generic interpreted method entry to (asm) interpreter
1310 //
1311 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1312   // determine code generation flags
1313   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1314 
1315   // ebx: Method*
1316   // rbcp: sender sp (set in InterpreterMacroAssembler::prepare_to_jump_from_interpreted / generate_call_stub)
1317   address entry_point = __ pc();
1318 
1319   const Address constMethod(rbx, Method::const_offset());
1320   const Address access_flags(rbx, Method::access_flags_offset());
1321   const Address size_of_parameters(rdx,
1322                                    ConstMethod::size_of_parameters_offset());
1323   const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
1324 
1325 
1326   // get parameter size (always needed)
1327   __ movptr(rdx, constMethod);
1328   __ load_unsigned_short(rcx, size_of_parameters);
1329 
1330   // rbx: Method*
1331   // rcx: size of parameters
1332   // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
1333 
1334   __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1335   __ subl(rdx, rcx); // rdx = no. of additional locals
1336 
< prev index next >