< prev index next >

src/cpu/x86/vm/templateInterpreter_x86_64.cpp

Print this page




 525     // get receiver (assume this is frequent case)
 526     __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
 527     __ jcc(Assembler::zero, done);
 528     __ movptr(rax, Address(rbx, Method::const_offset()));
 529     __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
 530     __ movptr(rax, Address(rax,
 531                            ConstantPool::pool_holder_offset_in_bytes()));
 532     __ movptr(rax, Address(rax, mirror_offset));
 533 
 534 #ifdef ASSERT
 535     {
 536       Label L;
 537       __ testptr(rax, rax);
 538       __ jcc(Assembler::notZero, L);
 539       __ stop("synchronization object is NULL");
 540       __ bind(L);
 541     }
 542 #endif // ASSERT
 543 
 544     __ bind(done);

 545   }
 546 
 547   // add space for monitor & lock
 548   __ subptr(rsp, entry_size); // add space for a monitor entry
 549   __ movptr(monitor_block_top, rsp);  // set new monitor block top
 550   // store object
 551   __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
 552   __ movptr(c_rarg1, rsp); // object address
 553   __ lock_object(c_rarg1);
 554 }
 555 
 556 // Generate a fixed interpreter frame. This is identical setup for
 557 // interpreted methods and for native methods hence the shared code.
 558 //
 559 // Args:
 560 //      rax: return address
 561 //      rbx: Method*
 562 //      r14: pointer to locals
 563 //      r13: sender sp
 564 //      rdx: cp cache


 663     __ shrl(rcx, 2 * BitsPerByte);
 664     __ andl(rcx, 0xFF);
 665     __ cmpl(rcx, Bytecodes::_getfield);
 666     __ jcc(Assembler::notEqual, slow_path);
 667 
 668     // Note: constant pool entry is not valid before bytecode is resolved
 669     __ movptr(rcx,
 670               Address(rdi,
 671                       rdx,
 672                       Address::times_8,
 673                       ConstantPoolCache::base_offset() +
 674                       ConstantPoolCacheEntry::f2_offset()));
 675     // edx: flags
 676     __ movl(rdx,
 677             Address(rdi,
 678                     rdx,
 679                     Address::times_8,
 680                     ConstantPoolCache::base_offset() +
 681                     ConstantPoolCacheEntry::flags_offset()));
 682 


 683     Label notObj, notInt, notByte, notBool, notShort;
 684     const Address field_address(rax, rcx, Address::times_1);
 685 
 686     // Need to differentiate between igetfield, agetfield, bgetfield etc.
 687     // because they are different sizes.
 688     // Use the type from the constant pool cache
 689     __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
 690     // Make sure we don't need to mask edx after the above shift
 691     ConstantPoolCacheEntry::verify_tos_state_shift();
 692 
 693     __ cmpl(rdx, atos);
 694     __ jcc(Assembler::notEqual, notObj);
 695     // atos
 696     __ load_heap_oop(rax, field_address);
 697     __ jmp(xreturn_path);
 698 
 699     __ bind(notObj);
 700     __ cmpl(rdx, itos);
 701     __ jcc(Assembler::notEqual, notInt);
 702     // itos


 773   //   G1 pre-barrier code is executed when the current method is
 774   //   Reference.get() then going through the normal method entry
 775   //   will be fine.
 776   // * The G1 code can, however, check the receiver object (the instance
 777   //   of java.lang.Reference) and jump to the slow path if null. If the
 778   //   Reference object is null then we obviously cannot fetch the referent
 779   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 780   //   regular method entry code to generate the NPE.
 781   //
 782   // This code is based on generate_accessor_enty.
 783   //
 784   // rbx: Method*
 785 
 786   // r13: senderSP must preserve for slow path, set SP to it on fast path
 787 
 788   address entry = __ pc();
 789 
 790   const int referent_offset = java_lang_ref_Reference::referent_offset;
 791   guarantee(referent_offset > 0, "referent offset not initialized");
 792 
 793   if (UseG1GC) {
 794     Label slow_path;
 795     // rbx: method
 796 
 797     // Check if local 0 != NULL
 798     // If the receiver is null then it is OK to jump to the slow path.
 799     __ movptr(rax, Address(rsp, wordSize));
 800 
 801     __ testptr(rax, rax);
 802     __ jcc(Assembler::zero, slow_path);
 803 


 804     // rax: local 0
 805     // rbx: method (but can be used as scratch now)
 806     // rdx: scratch
 807     // rdi: scratch
 808 
 809     // Generate the G1 pre-barrier code to log the value of
 810     // the referent field in an SATB buffer.
 811 
 812     // Load the value of the referent field.
 813     const Address field_address(rax, referent_offset);
 814     __ load_heap_oop(rax, field_address);
 815 
 816     // Generate the G1 pre-barrier code to log the value of
 817     // the referent field in an SATB buffer.
 818     __ g1_write_barrier_pre(noreg /* obj */,
 819                             rax /* pre_val */,
 820                             r15_thread /* thread */,
 821                             rbx /* tmp */,
 822                             true /* tosca_live */,
 823                             true /* expand_call */);


 914     __ jcc(Assembler::notEqual, slow_path);
 915 
 916     // We don't generate local frame and don't align stack because
 917     // we call stub code and there is no safepoint on this path.
 918 
 919     // Load parameters
 920     const Register crc = c_rarg0;  // crc
 921     const Register buf = c_rarg1;  // source java byte array address
 922     const Register len = c_rarg2;  // length
 923     const Register off = len;      // offset (never overlaps with 'len')
 924 
 925     // Arguments are reversed on java expression stack
 926     // Calculate address of start element
 927     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
 928       __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
 929       __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
 930       __ addq(buf, off); // + offset
 931       __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
 932     } else {
 933       __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array

 934       __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
 935       __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
 936       __ addq(buf, off); // + offset
 937       __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
 938     }
 939     // Can now load 'len' since we're finished with 'off'
 940     __ movl(len, Address(rsp, wordSize)); // Length
 941 
 942     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
 943     // result in rax
 944 
 945     // _areturn
 946     __ pop(rdi);                // get return address
 947     __ mov(rsp, r13);           // set sp to sender sp
 948     __ jmp(rdi);
 949 
 950     // generate a vanilla native entry as the slow path
 951     __ bind(slow_path);
 952 
 953     (void) generate_native_entry(false);




 525     // get receiver (assume this is frequent case)
 526     __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
 527     __ jcc(Assembler::zero, done);
 528     __ movptr(rax, Address(rbx, Method::const_offset()));
 529     __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
 530     __ movptr(rax, Address(rax,
 531                            ConstantPool::pool_holder_offset_in_bytes()));
 532     __ movptr(rax, Address(rax, mirror_offset));
 533 
 534 #ifdef ASSERT
 535     {
 536       Label L;
 537       __ testptr(rax, rax);
 538       __ jcc(Assembler::notZero, L);
 539       __ stop("synchronization object is NULL");
 540       __ bind(L);
 541     }
 542 #endif // ASSERT
 543 
 544     __ bind(done);
 545     oopDesc::bs()->interpreter_write_barrier(_masm, rax);
 546   }
 547 
 548   // add space for monitor & lock
 549   __ subptr(rsp, entry_size); // add space for a monitor entry
 550   __ movptr(monitor_block_top, rsp);  // set new monitor block top
 551   // store object
 552   __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
 553   __ movptr(c_rarg1, rsp); // object address
 554   __ lock_object(c_rarg1);
 555 }
 556 
 557 // Generate a fixed interpreter frame. This is identical setup for
 558 // interpreted methods and for native methods hence the shared code.
 559 //
 560 // Args:
 561 //      rax: return address
 562 //      rbx: Method*
 563 //      r14: pointer to locals
 564 //      r13: sender sp
 565 //      rdx: cp cache


 664     __ shrl(rcx, 2 * BitsPerByte);
 665     __ andl(rcx, 0xFF);
 666     __ cmpl(rcx, Bytecodes::_getfield);
 667     __ jcc(Assembler::notEqual, slow_path);
 668 
 669     // Note: constant pool entry is not valid before bytecode is resolved
 670     __ movptr(rcx,
 671               Address(rdi,
 672                       rdx,
 673                       Address::times_8,
 674                       ConstantPoolCache::base_offset() +
 675                       ConstantPoolCacheEntry::f2_offset()));
 676     // edx: flags
 677     __ movl(rdx,
 678             Address(rdi,
 679                     rdx,
 680                     Address::times_8,
 681                     ConstantPoolCache::base_offset() +
 682                     ConstantPoolCacheEntry::flags_offset()));
 683 
 684     oopDesc::bs()->interpreter_read_barrier_not_null(_masm, rax);
 685 
 686     Label notObj, notInt, notByte, notBool, notShort;
 687     const Address field_address(rax, rcx, Address::times_1);
 688 
 689     // Need to differentiate between igetfield, agetfield, bgetfield etc.
 690     // because they are different sizes.
 691     // Use the type from the constant pool cache
 692     __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
 693     // Make sure we don't need to mask edx after the above shift
 694     ConstantPoolCacheEntry::verify_tos_state_shift();
 695 
 696     __ cmpl(rdx, atos);
 697     __ jcc(Assembler::notEqual, notObj);
 698     // atos
 699     __ load_heap_oop(rax, field_address);
 700     __ jmp(xreturn_path);
 701 
 702     __ bind(notObj);
 703     __ cmpl(rdx, itos);
 704     __ jcc(Assembler::notEqual, notInt);
 705     // itos


 776   //   G1 pre-barrier code is executed when the current method is
 777   //   Reference.get() then going through the normal method entry
 778   //   will be fine.
 779   // * The G1 code can, however, check the receiver object (the instance
 780   //   of java.lang.Reference) and jump to the slow path if null. If the
 781   //   Reference object is null then we obviously cannot fetch the referent
 782   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 783   //   regular method entry code to generate the NPE.
 784   //
 785   // This code is based on generate_accessor_enty.
 786   //
 787   // rbx: Method*
 788 
 789   // r13: senderSP must preserve for slow path, set SP to it on fast path
 790 
 791   address entry = __ pc();
 792 
 793   const int referent_offset = java_lang_ref_Reference::referent_offset;
 794   guarantee(referent_offset > 0, "referent offset not initialized");
 795 
 796   if (UseG1GC || UseShenandoahGC) {
 797     Label slow_path;
 798     // rbx: method
 799 
 800     // Check if local 0 != NULL
 801     // If the receiver is null then it is OK to jump to the slow path.
 802     __ movptr(rax, Address(rsp, wordSize));
 803 
 804     __ testptr(rax, rax);
 805     __ jcc(Assembler::zero, slow_path);
 806 
 807     oopDesc::bs()->interpreter_read_barrier_not_null(_masm, rax);
 808 
 809     // rax: local 0
 810     // rbx: method (but can be used as scratch now)
 811     // rdx: scratch
 812     // rdi: scratch
 813 
 814     // Generate the G1 pre-barrier code to log the value of
 815     // the referent field in an SATB buffer.
 816 
 817     // Load the value of the referent field.
 818     const Address field_address(rax, referent_offset);
 819     __ load_heap_oop(rax, field_address);
 820 
 821     // Generate the G1 pre-barrier code to log the value of
 822     // the referent field in an SATB buffer.
 823     __ g1_write_barrier_pre(noreg /* obj */,
 824                             rax /* pre_val */,
 825                             r15_thread /* thread */,
 826                             rbx /* tmp */,
 827                             true /* tosca_live */,
 828                             true /* expand_call */);


 919     __ jcc(Assembler::notEqual, slow_path);
 920 
 921     // We don't generate local frame and don't align stack because
 922     // we call stub code and there is no safepoint on this path.
 923 
 924     // Load parameters
 925     const Register crc = c_rarg0;  // crc
 926     const Register buf = c_rarg1;  // source java byte array address
 927     const Register len = c_rarg2;  // length
 928     const Register off = len;      // offset (never overlaps with 'len')
 929 
 930     // Arguments are reversed on java expression stack
 931     // Calculate address of start element
 932     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
 933       __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
 934       __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
 935       __ addq(buf, off); // + offset
 936       __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
 937     } else {
 938       __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
 939       oopDesc::bs()->interpreter_read_barrier_not_null(_masm, buf);
 940       __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
 941       __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
 942       __ addq(buf, off); // + offset
 943       __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
 944     }
 945     // Can now load 'len' since we're finished with 'off'
 946     __ movl(len, Address(rsp, wordSize)); // Length
 947 
 948     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
 949     // result in rax
 950 
 951     // _areturn
 952     __ pop(rdi);                // get return address
 953     __ mov(rsp, r13);           // set sp to sender sp
 954     __ jmp(rdi);
 955 
 956     // generate a vanilla native entry as the slow path
 957     __ bind(slow_path);
 958 
 959     (void) generate_native_entry(false);


< prev index next >