< prev index next >

src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp

Print this page

 442   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
 443     rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
 444   }
 445   if (RTMRetryCount > 0) {
 446     // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
 447     rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
 448   }
 449 
 450   movptr(tmpReg, Address(boxReg, owner_offset)) ;
 451   testptr(tmpReg, tmpReg) ;
 452   jccb(Assembler::notZero, L_decrement_retry) ;
 453 
 454   // Appears unlocked - try to swing _owner from null to non-null.
 455   // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
 456 #ifdef _LP64
 457   Register threadReg = r15_thread;
 458 #else
 459   get_thread(scrReg);
 460   Register threadReg = scrReg;
 461 #endif

 462   lock();
 463   cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
 464 
 465   if (RTMRetryCount > 0) {
 466     // success done else retry
 467     jccb(Assembler::equal, DONE_LABEL) ;
 468     bind(L_decrement_retry);
 469     // Spin and retry if lock is busy.
 470     rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);

 471   }
 472   else {
 473     bind(L_decrement_retry);

 474   }
 475 }
 476 
 477 #endif //  INCLUDE_RTM_OPT
 478 
 479 // fast_lock and fast_unlock used by C2
 480 
 481 // Because the transitions from emitted code to the runtime
 482 // monitorenter/exit helper stubs are so slow it's critical that
 483 // we inline both the stack-locking fast path and the inflated fast path.
 484 //
 485 // See also: cmpFastLock and cmpFastUnlock.
 486 //
 487 // What follows is a specialized inline transliteration of the code
 488 // in enter() and exit(). If we're concerned about I$ bloat another
 489 // option would be to emit TrySlowEnter and TrySlowExit methods
 490 // at startup-time.  These methods would accept arguments as
 491 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
 492 // indications in the icc.ZFlag.  fast_lock and fast_unlock would simply
 493 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.

 568     assert(cx1Reg == noreg, "");
 569     assert(cx2Reg == noreg, "");
 570     assert_different_registers(objReg, boxReg, tmpReg, scrReg);
 571   }
 572 
 573   // Possible cases that we'll encounter in fast_lock
 574   // ------------------------------------------------
 575   // * Inflated
 576   //    -- unlocked
 577   //    -- Locked
 578   //       = by self
 579   //       = by other
 580   // * neutral
 581   // * stack-locked
 582   //    -- by self
 583   //       = sp-proximity test hits
 584   //       = sp-proximity test generates false-negative
 585   //    -- by other
 586   //
 587 
 588   Label IsInflated, DONE_LABEL, NO_COUNT, COUNT;
 589 
 590   if (DiagnoseSyncOnValueBasedClasses != 0) {
 591     load_klass(tmpReg, objReg, scrReg);
 592     movl(tmpReg, Address(tmpReg, Klass::access_flags_offset()));
 593     testl(tmpReg, JVM_ACC_IS_VALUE_BASED_CLASS);
 594     jcc(Assembler::notZero, DONE_LABEL);
 595   }
 596 
 597 #if INCLUDE_RTM_OPT
 598   if (UseRTMForStackLocks && use_rtm) {
 599     assert(LockingMode != LM_MONITOR, "LockingMode == 0 (LM_MONITOR) and +UseRTMForStackLocks are mutually exclusive");
 600     rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
 601                       stack_rtm_counters, method_data, profile_rtm,
 602                       DONE_LABEL, IsInflated);
 603   }
 604 #endif // INCLUDE_RTM_OPT
 605 
 606   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));          // [FETCH]
 607   testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral
 608   jcc(Assembler::notZero, IsInflated);

 610   if (LockingMode == LM_MONITOR) {
 611     // Clear ZF so that we take the slow path at the DONE label. objReg is known to be not 0.
 612     testptr(objReg, objReg);
 613   } else {
 614     assert(LockingMode == LM_LEGACY, "must be");
 615     // Attempt stack-locking ...
 616     orptr (tmpReg, markWord::unlocked_value);
 617     movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
 618     lock();
 619     cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes()));      // Updates tmpReg
 620     jcc(Assembler::equal, COUNT);           // Success
 621 
 622     // Recursive locking.
 623     // The object is stack-locked: markword contains stack pointer to BasicLock.
 624     // Locked by current thread if difference with current SP is less than one page.
 625     subptr(tmpReg, rsp);
 626     // Next instruction set ZFlag == 1 (Success) if difference is less then one page.
 627     andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - (int)os::vm_page_size())) );
 628     movptr(Address(boxReg, 0), tmpReg);
 629   }

 630   jmp(DONE_LABEL);
 631 
 632   bind(IsInflated);
 633   // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value
 634 
 635 #if INCLUDE_RTM_OPT
 636   // Use the same RTM locking code in 32- and 64-bit VM.
 637   if (use_rtm) {
 638     rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
 639                          rtm_counters, method_data, profile_rtm, DONE_LABEL);
 640   } else {
 641 #endif // INCLUDE_RTM_OPT
 642 
 643 #ifndef _LP64
 644   // The object is inflated.
 645 
 646   // boxReg refers to the on-stack BasicLock in the current frame.
 647   // We'd like to write:
 648   //   set box->_displaced_header = markWord::unused_mark().  Any non-0 value suffices.
 649   // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
 650   // additional latency as we have another ST in the store buffer that must drain.
 651 
 652   // avoid ST-before-CAS
 653   // register juggle because we need tmpReg for cmpxchgptr below
 654   movptr(scrReg, boxReg);
 655   movptr(boxReg, tmpReg);                   // consider: LEA box, [tmp-2]
 656 
 657   // Optimistic form: consider XORL tmpReg,tmpReg
 658   movptr(tmpReg, NULL_WORD);
 659 
 660   // Appears unlocked - try to swing _owner from null to non-null.
 661   // Ideally, I'd manifest "Self" with get_thread and then attempt
 662   // to CAS the register containing Self into m->Owner.
 663   // But we don't have enough registers, so instead we can either try to CAS
 664   // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
 665   // we later store "Self" into m->Owner.  Transiently storing a stack address
 666   // (rsp or the address of the box) into  m->owner is harmless.
 667   // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
 668   lock();
 669   cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
 670   movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
 671   // If we weren't able to swing _owner from null to the BasicLock
 672   // then take the slow path.
 673   jccb  (Assembler::notZero, NO_COUNT);
 674   // update _owner from BasicLock to thread
 675   get_thread (scrReg);                    // beware: clobbers ICCs

 676   movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg);
 677   xorptr(boxReg, boxReg);                 // set icc.ZFlag = 1 to indicate success

 678 
 679   // If the CAS fails we can either retry or pass control to the slow path.
 680   // We use the latter tactic.
 681   // Pass the CAS result in the icc.ZFlag into DONE_LABEL
 682   // If the CAS was successful ...
 683   //   Self has acquired the lock
 684   //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
 685   // Intentional fall-through into DONE_LABEL ...
 686 #else // _LP64




 687   // It's inflated and we use scrReg for ObjectMonitor* in this section.
 688   movq(scrReg, tmpReg);
 689   xorq(tmpReg, tmpReg);

 690   lock();
 691   cmpxchgptr(thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
 692   // Unconditionally set box->_displaced_header = markWord::unused_mark().
 693   // Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
 694   movptr(Address(boxReg, 0), checked_cast<int32_t>(markWord::unused_mark().value()));
 695   // Propagate ICC.ZF from CAS above into DONE_LABEL.
 696   jccb(Assembler::equal, COUNT);          // CAS above succeeded; propagate ZF = 1 (success)
 697 
 698   cmpptr(thread, rax);                // Check if we are already the owner (recursive lock)
 699   jccb(Assembler::notEqual, NO_COUNT);    // If not recursive, ZF = 0 at this point (fail)
 700   incq(Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
 701   xorq(rax, rax); // Set ZF = 1 (success) for recursive lock, denoting locking success

 702 #endif // _LP64
 703 #if INCLUDE_RTM_OPT
 704   } // use_rtm()
 705 #endif
 706   bind(DONE_LABEL);
 707 
 708   // ZFlag == 1 count in fast path
 709   // ZFlag == 0 count in slow path
 710   jccb(Assembler::notZero, NO_COUNT); // jump if ZFlag == 0
 711 
 712   bind(COUNT);
 713   // Count monitors in fast path
 714   increment(Address(thread, JavaThread::held_monitor_count_offset()));
 715 
 716   xorl(tmpReg, tmpReg); // Set ZF == 1
 717 
 718   bind(NO_COUNT);
 719 
 720   // At NO_COUNT the icc ZFlag is set as follows ...
 721   // fast_unlock uses the same protocol.
 722   // ZFlag == 1 -> Success
 723   // ZFlag == 0 -> Failure - force control through the slow path
 724 }
 725 
 726 // obj: object to unlock
 727 // box: box address (displaced header location), killed.  Must be EAX.
 728 // tmp: killed, cannot be obj nor box.
 729 //
 730 // Some commentary on balanced locking:
 731 //
 732 // fast_lock and fast_unlock are emitted only for provably balanced lock sites.
 733 // Methods that don't have provably balanced locking are forced to run in the
 734 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
 735 // The interpreter provides two properties:
 736 // I1:  At return-time the interpreter automatically and quietly unlocks any
 737 //      objects acquired the current activation (frame).  Recall that the
 738 //      interpreter maintains an on-stack list of locks currently held by
 739 //      a frame.
 740 // I2:  If a method attempts to unlock an object that is not held by the
 741 //      the frame the interpreter throws IMSX.
 742 //
 743 // Lets say A(), which has provably balanced locking, acquires O and then calls B().
 744 // B() doesn't have provably balanced locking so it runs in the interpreter.
 745 // Control returns to A() and A() unlocks O.  By I1 and I2, above, we know that O
 746 // is still locked by A().
 747 //
 748 // The only other source of unbalanced locking would be JNI.  The "Java Native Interface:
 749 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
 750 // should not be unlocked by "normal" java-level locking and vice-versa.  The specification
 751 // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
 752 // Arguably given that the spec legislates the JNI case as undefined our implementation
 753 // could reasonably *avoid* checking owner in fast_unlock().
 754 // In the interest of performance we elide m->Owner==Self check in unlock.
 755 // A perfectly viable alternative is to elide the owner check except when
 756 // Xcheck:jni is enabled.
 757 
 758 void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
 759   assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
 760   assert(boxReg == rax, "");
 761   assert_different_registers(objReg, boxReg, tmpReg);
 762 
 763   Label DONE_LABEL, Stacked, COUNT, NO_COUNT;
 764 
 765 #if INCLUDE_RTM_OPT
 766   if (UseRTMForStackLocks && use_rtm) {
 767     assert(LockingMode != LM_MONITOR, "LockingMode == 0 (LM_MONITOR) and +UseRTMForStackLocks are mutually exclusive");
 768     Label L_regular_unlock;
 769     movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
 770     andptr(tmpReg, markWord::lock_mask_in_place);                     // look at 2 lock bits
 771     cmpptr(tmpReg, markWord::unlocked_value);                         // bits = 01 unlocked
 772     jccb(Assembler::notEqual, L_regular_unlock);                      // if !HLE RegularLock
 773     xend();                                                           // otherwise end...
 774     jmp(DONE_LABEL);                                                  // ... and we're done
 775     bind(L_regular_unlock);
 776   }
 777 #endif
 778 
 779   if (LockingMode == LM_LEGACY) {
 780     cmpptr(Address(boxReg, 0), NULL_WORD);                            // Examine the displaced header
 781     jcc   (Assembler::zero, COUNT);                                   // 0 indicates recursive stack-lock
 782   }
 783   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));   // Examine the object's markword
 784   if (LockingMode != LM_MONITOR) {
 785     testptr(tmpReg, markWord::monitor_value);                         // Inflated?
 786     jcc(Assembler::zero, Stacked);
 787   }
 788 
 789   // It's inflated.















 790 
 791 #if INCLUDE_RTM_OPT
 792   if (use_rtm) {
 793     Label L_regular_inflated_unlock;
 794     int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
 795     movptr(boxReg, Address(tmpReg, owner_offset));
 796     testptr(boxReg, boxReg);
 797     jccb(Assembler::notZero, L_regular_inflated_unlock);
 798     xend();
 799     jmp(DONE_LABEL);
 800     bind(L_regular_inflated_unlock);
 801   }
 802 #endif
 803 
 804   // Despite our balanced locking property we still check that m->_owner == Self
 805   // as java routines or native JNI code called by this thread might
 806   // have released the lock.
 807   // Refer to the comments in synchronizer.cpp for how we might encode extra
 808   // state in _succ so we can avoid fetching EntryList|cxq.
 809   //

 824   // the number of loads below (currently 4) to just 2 or 3.
 825   // Refer to the comments in synchronizer.cpp.
 826   // In practice the chain of fetches doesn't seem to impact performance, however.
 827   xorptr(boxReg, boxReg);
 828   orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
 829   jccb  (Assembler::notZero, DONE_LABEL);
 830   movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
 831   orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
 832   jccb  (Assembler::notZero, DONE_LABEL);
 833   movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
 834   jmpb  (DONE_LABEL);
 835 #else // _LP64
 836   // It's inflated
 837   Label CheckSucc, LNotRecursive, LSuccess, LGoSlowPath;
 838 
 839   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 0);
 840   jccb(Assembler::equal, LNotRecursive);
 841 
 842   // Recursive inflated unlock
 843   decq(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
 844   jmpb(LSuccess);

 845 
 846   bind(LNotRecursive);

 847   movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
 848   orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
 849   jccb  (Assembler::notZero, CheckSucc);
 850   // Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
 851   movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
 852   jmpb  (DONE_LABEL);
 853 
 854   // Try to avoid passing control into the slow_path ...
 855   bind  (CheckSucc);
 856 
 857   // The following optional optimization can be elided if necessary
 858   // Effectively: if (succ == null) goto slow path
 859   // The code reduces the window for a race, however,
 860   // and thus benefits performance.
 861   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
 862   jccb  (Assembler::zero, LGoSlowPath);
 863 
 864   xorptr(boxReg, boxReg);
 865   // Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
 866   movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);

 874   // (mov box,0; xchgq box, &m->Owner; LD _succ) .
 875   lock(); addl(Address(rsp, 0), 0);
 876 
 877   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
 878   jccb  (Assembler::notZero, LSuccess);
 879 
 880   // Rare inopportune interleaving - race.
 881   // The successor vanished in the small window above.
 882   // The lock is contended -- (cxq|EntryList) != null -- and there's no apparent successor.
 883   // We need to ensure progress and succession.
 884   // Try to reacquire the lock.
 885   // If that fails then the new owner is responsible for succession and this
 886   // thread needs to take no further action and can exit via the fast path (success).
 887   // If the re-acquire succeeds then pass control into the slow path.
 888   // As implemented, this latter mode is horrible because we generated more
 889   // coherence traffic on the lock *and* artificially extended the critical section
 890   // length while by virtue of passing control into the slow path.
 891 
 892   // box is really RAX -- the following CMPXCHG depends on that binding
 893   // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)

 894   lock();
 895   cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
 896   // There's no successor so we tried to regrab the lock.
 897   // If that didn't work, then another thread grabbed the
 898   // lock so we're done (and exit was a success).
 899   jccb  (Assembler::notEqual, LSuccess);
 900   // Intentional fall-through into slow path
 901 
 902   bind  (LGoSlowPath);
 903   orl   (boxReg, 1);                      // set ICC.ZF=0 to indicate failure
 904   jmpb  (DONE_LABEL);
 905 
 906   bind  (LSuccess);
 907   testl (boxReg, 0);                      // set ICC.ZF=1 to indicate success
 908   jmpb  (DONE_LABEL);
 909 
 910 #endif
 911   if (LockingMode == LM_LEGACY) {
 912     bind  (Stacked);
 913     movptr(tmpReg, Address (boxReg, 0));      // re-fetch
 914     lock();
 915     cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
 916     // Intentional fall-thru into DONE_LABEL
 917   }
 918 
 919   bind(DONE_LABEL);
 920 
 921   // ZFlag == 1 count in fast path
 922   // ZFlag == 0 count in slow path
 923   jccb(Assembler::notZero, NO_COUNT);
 924 
 925   bind(COUNT);
 926   // Count monitors in fast path
 927 #ifndef _LP64
 928   get_thread(tmpReg);
 929   decrementl(Address(tmpReg, JavaThread::held_monitor_count_offset()));
 930 #else // _LP64
 931   decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
 932 #endif


 933 
 934   xorl(tmpReg, tmpReg); // Set ZF == 1
 935 
 936   bind(NO_COUNT);
 937 }
 938 
 939 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register rax_reg,
 940                                               Register t, Register thread) {
 941   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 942   assert(rax_reg == rax, "Used for CAS");
 943   assert_different_registers(obj, box, rax_reg, t, thread);
 944 
 945   // Handle inflated monitor.
 946   Label inflated;
 947   // Finish fast lock successfully. ZF value is irrelevant.
 948   Label locked;
 949   // Finish fast lock unsuccessfully. MUST jump with ZF == 0
 950   Label slow_path;
 951 
 952   if (DiagnoseSyncOnValueBasedClasses != 0) {
 953     load_klass(rax_reg, obj, t);
 954     movl(rax_reg, Address(rax_reg, Klass::access_flags_offset()));
 955     testl(rax_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
 956     jcc(Assembler::notZero, slow_path);
 957   }
 958 
 959   const Register mark = t;
 960 
 961   { // Lightweight Lock
 962 
 963     Label push;
 964 
 965     const Register top = box;
 966 
 967     // Load the mark.

 976 
 977     // Check if lock-stack is full.
 978     cmpl(top, LockStack::end_offset() - 1);
 979     jcc(Assembler::greater, slow_path);
 980 
 981     // Check if recursive.
 982     cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
 983     jccb(Assembler::equal, push);
 984 
 985     // Try to lock. Transition lock bits 0b01 => 0b00
 986     movptr(rax_reg, mark);
 987     orptr(rax_reg, markWord::unlocked_value);
 988     andptr(mark, ~(int32_t)markWord::unlocked_value);
 989     lock(); cmpxchgptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
 990     jcc(Assembler::notEqual, slow_path);
 991 
 992     bind(push);
 993     // After successful lock, push object on lock-stack.
 994     movptr(Address(thread, top), obj);
 995     addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);

 996     jmpb(locked);
 997   }
 998 
 999   { // Handle inflated monitor.
1000     bind(inflated);
1001 
1002     const Register tagged_monitor = mark;
1003 
1004     // CAS owner (null => current thread).
1005     xorptr(rax_reg, rax_reg);
1006     lock(); cmpxchgptr(thread, Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));

1007     jccb(Assembler::equal, locked);
1008 
1009     // Check if recursive.
1010     cmpptr(thread, rax_reg);
1011     jccb(Assembler::notEqual, slow_path);
1012 
1013     // Recursive.
1014     increment(Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));

1015   }
1016 
1017   bind(locked);
1018   increment(Address(thread, JavaThread::held_monitor_count_offset()));
1019   // Set ZF = 1
1020   xorl(rax_reg, rax_reg);
1021 
1022 #ifdef ASSERT
1023   // Check that locked label is reached with ZF set.
1024   Label zf_correct;
1025   Label zf_bad_zero;
1026   jcc(Assembler::zero, zf_correct);
1027   jmp(zf_bad_zero);
1028 #endif
1029 
1030   bind(slow_path);
1031 #ifdef ASSERT
1032   // Check that slow_path label is reached with ZF not set.
1033   jcc(Assembler::notZero, zf_correct);
1034   stop("Fast Lock ZF != 0");
1035   bind(zf_bad_zero);
1036   stop("Fast Lock ZF != 1");
1037   bind(zf_correct);
1038 #endif
1039   // C2 uses the value of ZF to determine the continuation.
1040 }
1041 
1042 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, Register t, Register thread) {
1043   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1044   assert(reg_rax == rax, "Used for CAS");
1045   assert_different_registers(obj, reg_rax, t);
1046 
1047   // Handle inflated monitor.
1048   Label inflated, inflated_check_lock_stack;
1049   // Finish fast unlock successfully.  MUST jump with ZF == 1
1050   Label unlocked;
1051 
1052   // Assume success.
1053   decrement(Address(thread, JavaThread::held_monitor_count_offset()));
1054 
1055   const Register mark = t;
1056   const Register top = reg_rax;
1057 
1058   Label dummy;
1059   C2FastUnlockLightweightStub* stub = nullptr;
1060 
1061   if (!Compile::current()->output()->in_scratch_emit_size()) {
1062     stub = new (Compile::current()->comp_arena()) C2FastUnlockLightweightStub(obj, mark, reg_rax, thread);
1063     Compile::current()->output()->add_stub(stub);
1064   }
1065 
1066   Label& push_and_slow_path = stub == nullptr ? dummy : stub->push_and_slow_path();
1067   Label& check_successor = stub == nullptr ? dummy : stub->check_successor();
1068 
1069   { // Lightweight Unlock
1070 
1071     // Load top.
1072     movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
1073 
1074     // Prefetch mark.
1075     movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
1076 
1077     // Check if obj is top of lock-stack.
1078     cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
1079     // Top of lock stack was not obj. Must be monitor.
1080     jcc(Assembler::notEqual, inflated_check_lock_stack);
1081 
1082     // Pop lock-stack.

1135     movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
1136 #else // _LP64
1137     Label recursive;
1138 
1139     // Check if recursive.
1140     cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 0);
1141     jccb(Assembler::notEqual, recursive);
1142 
1143     // Check if the entry lists are empty.
1144     movptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
1145     orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
1146     jcc(Assembler::notZero, check_successor);
1147 
1148     // Release lock.
1149     movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
1150     jmpb(unlocked);
1151 
1152     // Recursive unlock.
1153     bind(recursive);
1154     decrement(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
1155     xorl(t, t);
1156 #endif
1157   }
1158 
1159   bind(unlocked);
1160   if (stub != nullptr) {
1161     bind(stub->unlocked_continuation());
1162   }
1163 
1164 #ifdef ASSERT
1165   // Check that unlocked label is reached with ZF set.
1166   Label zf_correct;
1167   jcc(Assembler::zero, zf_correct);
1168   stop("Fast Unlock ZF != 1");
1169 #endif
1170 
1171   if (stub != nullptr) {
1172     bind(stub->slow_path_continuation());
1173   }
1174 #ifdef ASSERT
1175   // Check that stub->continuation() label is reached with ZF not set.

 442   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
 443     rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
 444   }
 445   if (RTMRetryCount > 0) {
 446     // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
 447     rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
 448   }
 449 
 450   movptr(tmpReg, Address(boxReg, owner_offset)) ;
 451   testptr(tmpReg, tmpReg) ;
 452   jccb(Assembler::notZero, L_decrement_retry) ;
 453 
 454   // Appears unlocked - try to swing _owner from null to non-null.
 455   // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
 456 #ifdef _LP64
 457   Register threadReg = r15_thread;
 458 #else
 459   get_thread(scrReg);
 460   Register threadReg = scrReg;
 461 #endif
 462   movptr(scrReg, Address(threadReg, JavaThread::lock_id_offset()));
 463   lock();
 464   cmpxchgptr(scrReg, Address(boxReg, owner_offset)); // Updates tmpReg
 465 
 466   if (RTMRetryCount > 0) {
 467     // success done else retry
 468     jccb(Assembler::equal, DONE_LABEL) ;
 469     bind(L_decrement_retry);
 470     // Spin and retry if lock is busy.
 471     rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
 472     jmp(DONE_LABEL);
 473   }
 474   else {
 475     bind(L_decrement_retry);
 476     jmp(DONE_LABEL);
 477   }
 478 }
 479 
 480 #endif //  INCLUDE_RTM_OPT
 481 
 482 // fast_lock and fast_unlock used by C2
 483 
 484 // Because the transitions from emitted code to the runtime
 485 // monitorenter/exit helper stubs are so slow it's critical that
 486 // we inline both the stack-locking fast path and the inflated fast path.
 487 //
 488 // See also: cmpFastLock and cmpFastUnlock.
 489 //
 490 // What follows is a specialized inline transliteration of the code
 491 // in enter() and exit(). If we're concerned about I$ bloat another
 492 // option would be to emit TrySlowEnter and TrySlowExit methods
 493 // at startup-time.  These methods would accept arguments as
 494 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
 495 // indications in the icc.ZFlag.  fast_lock and fast_unlock would simply
 496 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.

 571     assert(cx1Reg == noreg, "");
 572     assert(cx2Reg == noreg, "");
 573     assert_different_registers(objReg, boxReg, tmpReg, scrReg);
 574   }
 575 
 576   // Possible cases that we'll encounter in fast_lock
 577   // ------------------------------------------------
 578   // * Inflated
 579   //    -- unlocked
 580   //    -- Locked
 581   //       = by self
 582   //       = by other
 583   // * neutral
 584   // * stack-locked
 585   //    -- by self
 586   //       = sp-proximity test hits
 587   //       = sp-proximity test generates false-negative
 588   //    -- by other
 589   //
 590 
 591   Label IsInflated, DONE_LABEL, COUNT;
 592 
 593   if (DiagnoseSyncOnValueBasedClasses != 0) {
 594     load_klass(tmpReg, objReg, scrReg);
 595     movl(tmpReg, Address(tmpReg, Klass::access_flags_offset()));
 596     testl(tmpReg, JVM_ACC_IS_VALUE_BASED_CLASS);
 597     jcc(Assembler::notZero, DONE_LABEL);
 598   }
 599 
 600 #if INCLUDE_RTM_OPT
 601   if (UseRTMForStackLocks && use_rtm) {
 602     assert(LockingMode != LM_MONITOR, "LockingMode == 0 (LM_MONITOR) and +UseRTMForStackLocks are mutually exclusive");
 603     rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
 604                       stack_rtm_counters, method_data, profile_rtm,
 605                       DONE_LABEL, IsInflated);
 606   }
 607 #endif // INCLUDE_RTM_OPT
 608 
 609   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));          // [FETCH]
 610   testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral
 611   jcc(Assembler::notZero, IsInflated);

 613   if (LockingMode == LM_MONITOR) {
 614     // Clear ZF so that we take the slow path at the DONE label. objReg is known to be not 0.
 615     testptr(objReg, objReg);
 616   } else {
 617     assert(LockingMode == LM_LEGACY, "must be");
 618     // Attempt stack-locking ...
 619     orptr (tmpReg, markWord::unlocked_value);
 620     movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
 621     lock();
 622     cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes()));      // Updates tmpReg
 623     jcc(Assembler::equal, COUNT);           // Success
 624 
 625     // Recursive locking.
 626     // The object is stack-locked: markword contains stack pointer to BasicLock.
 627     // Locked by current thread if difference with current SP is less than one page.
 628     subptr(tmpReg, rsp);
 629     // Next instruction set ZFlag == 1 (Success) if difference is less then one page.
 630     andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - (int)os::vm_page_size())) );
 631     movptr(Address(boxReg, 0), tmpReg);
 632   }
 633   // After recursive stack locking attempt case
 634   jmp(DONE_LABEL);
 635 
 636   bind(IsInflated);
 637   // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value
 638 
 639 #if INCLUDE_RTM_OPT
 640   // Use the same RTM locking code in 32- and 64-bit VM.
 641   if (use_rtm) {
 642     rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
 643                          rtm_counters, method_data, profile_rtm, DONE_LABEL);
 644   } else {
 645 #endif // INCLUDE_RTM_OPT
 646 
 647 #ifndef _LP64
 648   // The object is inflated.
 649 
 650   // boxReg refers to the on-stack BasicLock in the current frame.
 651   // We'd like to write:
 652   //   set box->_displaced_header = markWord::unused_mark().  Any non-0 value suffices.
 653   // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
 654   // additional latency as we have another ST in the store buffer that must drain.
 655 
 656   // avoid ST-before-CAS
 657   // register juggle because we need tmpReg for cmpxchgptr below
 658   movptr(scrReg, boxReg);
 659   movptr(boxReg, tmpReg);                   // consider: LEA box, [tmp-2]
 660 
 661   // Optimistic form: consider XORL tmpReg,tmpReg
 662   movptr(tmpReg, NULL_WORD);
 663 
 664   // Appears unlocked - try to swing _owner from null to non-null.
 665   // Ideally, I'd manifest "Self" with get_thread and then attempt
 666   // to CAS the register containing thread id into m->Owner.
 667   // But we don't have enough registers, so instead we can either try to CAS
 668   // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
 669   // we later store thread id into m->Owner.  Transiently storing a stack address
 670   // (rsp or the address of the box) into  m->owner is harmless.
 671   // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
 672   lock();
 673   cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
 674   movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
 675   // If we weren't able to swing _owner from null to the BasicLock
 676   // then take the slow path.
 677   jccb  (Assembler::notZero, DONE_LABEL);
 678   // update _owner from BasicLock to thread
 679   get_thread (scrReg);                    // beware: clobbers ICCs
 680   movptr(scrReg, Address(scrReg, JavaThread::lock_id_offset()));
 681   movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg);
 682   xorptr(boxReg, boxReg);                 // set icc.ZFlag = 1 to indicate success
 683   jmp(DONE_LABEL);
 684 
 685   // If the CAS fails we can either retry or pass control to the slow path.
 686   // We use the latter tactic.
 687   // Pass the CAS result in the icc.ZFlag into DONE_LABEL
 688   // If the CAS was successful ...
 689   //   Self has acquired the lock
 690   //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
 691   // Intentional fall-through into DONE_LABEL ...
 692 #else // _LP64
 693   // Unconditionally set box->_displaced_header = markWord::unused_mark().
 694   // Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
 695   movptr(Address(boxReg, 0), checked_cast<int32_t>(markWord::unused_mark().value()));
 696 
 697   // It's inflated and we use scrReg for ObjectMonitor* in this section.
 698   movq(scrReg, tmpReg);
 699   xorq(tmpReg, tmpReg);
 700   movptr(boxReg, Address(r15_thread, JavaThread::lock_id_offset()));
 701   lock();
 702   cmpxchgptr(boxReg, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
 703 


 704   // Propagate ICC.ZF from CAS above into DONE_LABEL.
 705   jccb(Assembler::equal, DONE_LABEL);    // CAS above succeeded; propagate ZF = 1 (success)
 706 
 707   cmpptr(boxReg, rax);                // Check if we are already the owner (recursive lock)
 708   jccb(Assembler::notEqual, DONE_LABEL);    // If not recursive, ZF = 0 at this point (fail)
 709   incq(Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
 710   xorq(rax, rax); // Set ZF = 1 (success) for recursive lock, denoting locking success
 711   jmp(DONE_LABEL);
 712 #endif // _LP64
 713 #if INCLUDE_RTM_OPT
 714   } // use_rtm()
 715 #endif





 716 
 717   bind(COUNT);
 718   // Count monitors in fast path
 719   increment(Address(thread, JavaThread::held_monitor_count_offset()));

 720   xorl(tmpReg, tmpReg); // Set ZF == 1
 721 
 722   bind(DONE_LABEL);
 723 
 724   // At DONE_LABEL the icc ZFlag is set as follows ...
 725   // fast_unlock uses the same protocol.
 726   // ZFlag == 1 -> Success
 727   // ZFlag == 0 -> Failure - force control through the slow path
 728 }
 729 
 730 // obj: object to unlock
 731 // box: box address (displaced header location), killed.  Must be EAX.
 732 // tmp: killed, cannot be obj nor box.
 733 //
 734 // Some commentary on balanced locking:
 735 //
 736 // fast_lock and fast_unlock are emitted only for provably balanced lock sites.
 737 // Methods that don't have provably balanced locking are forced to run in the
 738 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
 739 // The interpreter provides two properties:
 740 // I1:  At return-time the interpreter automatically and quietly unlocks any
 741 //      objects acquired the current activation (frame).  Recall that the
 742 //      interpreter maintains an on-stack list of locks currently held by
 743 //      a frame.
 744 // I2:  If a method attempts to unlock an object that is not held by the
 745 //      the frame the interpreter throws IMSX.
 746 //
 747 // Lets say A(), which has provably balanced locking, acquires O and then calls B().
 748 // B() doesn't have provably balanced locking so it runs in the interpreter.
 749 // Control returns to A() and A() unlocks O.  By I1 and I2, above, we know that O
 750 // is still locked by A().
 751 //
 752 // The only other source of unbalanced locking would be JNI.  The "Java Native Interface:
 753 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
 754 // should not be unlocked by "normal" java-level locking and vice-versa.  The specification
 755 // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
 756 // Arguably given that the spec legislates the JNI case as undefined our implementation
 757 // could reasonably *avoid* checking owner in fast_unlock().
 758 // In the interest of performance we elide m->Owner==Self check in unlock.
 759 // A perfectly viable alternative is to elide the owner check except when
 760 // Xcheck:jni is enabled.
 761 
 762 void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, Register scrReg, bool use_rtm) {
 763   assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
 764   assert(boxReg == rax, "");
 765   assert_different_registers(objReg, boxReg, tmpReg);
 766 
 767   Label DONE_LABEL, Stacked, COUNT;
 768 
 769 #if INCLUDE_RTM_OPT
 770   if (UseRTMForStackLocks && use_rtm) {
 771     assert(LockingMode != LM_MONITOR, "LockingMode == 0 (LM_MONITOR) and +UseRTMForStackLocks are mutually exclusive");
 772     Label L_regular_unlock;
 773     movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
 774     andptr(tmpReg, markWord::lock_mask_in_place);                     // look at 2 lock bits
 775     cmpptr(tmpReg, markWord::unlocked_value);                         // bits = 01 unlocked
 776     jccb(Assembler::notEqual, L_regular_unlock);                      // if !HLE RegularLock
 777     xend();                                                           // otherwise end...
 778     jmp(DONE_LABEL);                                                  // ... and we're done
 779     bind(L_regular_unlock);
 780   }
 781 #endif
 782 
 783   if (LockingMode == LM_LEGACY) {
 784     cmpptr(Address(boxReg, 0), NULL_WORD);                            // Examine the displaced header
 785     jcc   (Assembler::zero, DONE_LABEL);                              // 0 indicates recursive stack-lock
 786   }
 787   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));   // Examine the object's markword
 788   if (LockingMode != LM_MONITOR) {
 789     testptr(tmpReg, markWord::monitor_value);                         // Inflated?
 790     jcc(Assembler::zero, Stacked);
 791   }
 792 
 793   // It's inflated.
 794   // If the owner is ANONYMOUS, we need to fix it -  in an outline stub.
 795   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t) ObjectMonitor::ANONYMOUS_OWNER);
 796 #ifdef _LP64
 797   if (!Compile::current()->output()->in_scratch_emit_size()) {
 798     C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmpReg, boxReg);
 799     Compile::current()->output()->add_stub(stub);
 800     jcc(Assembler::equal, stub->entry());
 801     bind(stub->continuation());
 802   } else
 803 #endif
 804   {
 805     // We can't easily implement this optimization on 32 bit because we don't have a thread register.
 806     // Call the slow-path instead.
 807     jcc(Assembler::notEqual, DONE_LABEL);
 808   }
 809 
 810 #if INCLUDE_RTM_OPT
 811   if (use_rtm) {
 812     Label L_regular_inflated_unlock;
 813     int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
 814     movptr(boxReg, Address(tmpReg, owner_offset));
 815     testptr(boxReg, boxReg);
 816     jccb(Assembler::notZero, L_regular_inflated_unlock);
 817     xend();
 818     jmp(DONE_LABEL);
 819     bind(L_regular_inflated_unlock);
 820   }
 821 #endif
 822 
 823   // Despite our balanced locking property we still check that m->_owner == Self
 824   // as java routines or native JNI code called by this thread might
 825   // have released the lock.
 826   // Refer to the comments in synchronizer.cpp for how we might encode extra
 827   // state in _succ so we can avoid fetching EntryList|cxq.
 828   //

 843   // the number of loads below (currently 4) to just 2 or 3.
 844   // Refer to the comments in synchronizer.cpp.
 845   // In practice the chain of fetches doesn't seem to impact performance, however.
 846   xorptr(boxReg, boxReg);
 847   orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
 848   jccb  (Assembler::notZero, DONE_LABEL);
 849   movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
 850   orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
 851   jccb  (Assembler::notZero, DONE_LABEL);
 852   movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
 853   jmpb  (DONE_LABEL);
 854 #else // _LP64
 855   // It's inflated
 856   Label CheckSucc, LNotRecursive, LSuccess, LGoSlowPath;
 857 
 858   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 0);
 859   jccb(Assembler::equal, LNotRecursive);
 860 
 861   // Recursive inflated unlock
 862   decq(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
 863   xorl(tmpReg, tmpReg); // Set ZF == 1
 864   jmp(DONE_LABEL);
 865 
 866   bind(LNotRecursive);
 867 
 868   movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
 869   orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
 870   jccb  (Assembler::notZero, CheckSucc);
 871   // Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
 872   movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
 873   jmpb  (DONE_LABEL);
 874 
 875   // Try to avoid passing control into the slow_path ...
 876   bind  (CheckSucc);
 877 
 878   // The following optional optimization can be elided if necessary
 879   // Effectively: if (succ == null) goto slow path
 880   // The code reduces the window for a race, however,
 881   // and thus benefits performance.
 882   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
 883   jccb  (Assembler::zero, LGoSlowPath);
 884 
 885   xorptr(boxReg, boxReg);
 886   // Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
 887   movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);

 895   // (mov box,0; xchgq box, &m->Owner; LD _succ) .
 896   lock(); addl(Address(rsp, 0), 0);
 897 
 898   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
 899   jccb  (Assembler::notZero, LSuccess);
 900 
 901   // Rare inopportune interleaving - race.
 902   // The successor vanished in the small window above.
 903   // The lock is contended -- (cxq|EntryList) != null -- and there's no apparent successor.
 904   // We need to ensure progress and succession.
 905   // Try to reacquire the lock.
 906   // If that fails then the new owner is responsible for succession and this
 907   // thread needs to take no further action and can exit via the fast path (success).
 908   // If the re-acquire succeeds then pass control into the slow path.
 909   // As implemented, this latter mode is horrible because we generated more
 910   // coherence traffic on the lock *and* artificially extended the critical section
 911   // length while by virtue of passing control into the slow path.
 912 
 913   // box is really RAX -- the following CMPXCHG depends on that binding
 914   // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
 915   movptr(scrReg, Address(r15_thread, JavaThread::lock_id_offset()));
 916   lock();
 917   cmpxchgptr(scrReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
 918   // There's no successor so we tried to regrab the lock.
 919   // If that didn't work, then another thread grabbed the
 920   // lock so we're done (and exit was a success).
 921   jccb  (Assembler::notEqual, LSuccess);
 922   // Intentional fall-through into slow path
 923 
 924   bind  (LGoSlowPath);
 925   orl   (boxReg, 1);                      // set ICC.ZF=0 to indicate failure
 926   jmpb  (DONE_LABEL);
 927 
 928   bind  (LSuccess);
 929   testl (boxReg, 0);                      // set ICC.ZF=1 to indicate success
 930   jmpb  (DONE_LABEL);
 931 
 932 #endif
 933   if (LockingMode == LM_LEGACY) {
 934     bind  (Stacked);
 935     movptr(tmpReg, Address (boxReg, 0));      // re-fetch
 936     lock();
 937     cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
 938     jccb(Assembler::notZero, DONE_LABEL);
 939     // Count monitors in fast path









 940 #ifndef _LP64
 941     get_thread(tmpReg);
 942     decrementl(Address(tmpReg, JavaThread::held_monitor_count_offset()));
 943 #else // _LP64
 944     decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
 945 #endif
 946     xorl(tmpReg, tmpReg); // Set ZF == 1
 947   }
 948 
 949   // ZFlag == 1 -> Success
 950   // ZFlag == 0 -> Failure - force control through the slow path
 951   bind(DONE_LABEL);
 952 }
 953 
 954 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register rax_reg,
 955                                               Register t, Register thread) {
 956   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 957   assert(rax_reg == rax, "Used for CAS");
 958   assert_different_registers(obj, box, rax_reg, t, thread);
 959 
 960   // Handle inflated monitor.
 961   Label inflated;
 962   // Finish fast lock successfully.
 963   Label locked;
 964   // Finish fast lock unsuccessfully. MUST jump with ZF == 0
 965   Label slow_path;
 966 
 967   if (DiagnoseSyncOnValueBasedClasses != 0) {
 968     load_klass(rax_reg, obj, t);
 969     movl(rax_reg, Address(rax_reg, Klass::access_flags_offset()));
 970     testl(rax_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
 971     jcc(Assembler::notZero, slow_path);
 972   }
 973 
 974   const Register mark = t;
 975 
 976   { // Lightweight Lock
 977 
 978     Label push;
 979 
 980     const Register top = box;
 981 
 982     // Load the mark.

 991 
 992     // Check if lock-stack is full.
 993     cmpl(top, LockStack::end_offset() - 1);
 994     jcc(Assembler::greater, slow_path);
 995 
 996     // Check if recursive.
 997     cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
 998     jccb(Assembler::equal, push);
 999 
1000     // Try to lock. Transition lock bits 0b01 => 0b00
1001     movptr(rax_reg, mark);
1002     orptr(rax_reg, markWord::unlocked_value);
1003     andptr(mark, ~(int32_t)markWord::unlocked_value);
1004     lock(); cmpxchgptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
1005     jcc(Assembler::notEqual, slow_path);
1006 
1007     bind(push);
1008     // After successful lock, push object on lock-stack.
1009     movptr(Address(thread, top), obj);
1010     addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
1011     xorl(rax_reg, rax_reg);
1012     jmpb(locked);
1013   }
1014 
1015   { // Handle inflated monitor.
1016     bind(inflated);
1017 
1018     const Register tagged_monitor = mark;
1019 
1020     // CAS owner (null => current thread).
1021     xorptr(rax_reg, rax_reg);
1022     movptr(box, Address(thread, JavaThread::lock_id_offset()));
1023     lock(); cmpxchgptr(box, Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1024     jccb(Assembler::equal, locked);
1025 
1026     // Check if recursive.
1027     cmpptr(box, rax_reg);
1028     jccb(Assembler::notEqual, slow_path);
1029 
1030     // Recursive.
1031     increment(Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
1032     xorl(rax_reg, rax_reg);
1033   }
1034 
1035   bind(locked);




1036 #ifdef ASSERT
1037   // Check that locked label is reached with ZF set.
1038   Label zf_correct;
1039   Label zf_bad_zero;
1040   jcc(Assembler::zero, zf_correct);
1041   jmp(zf_bad_zero);
1042 #endif
1043 
1044   bind(slow_path);
1045 #ifdef ASSERT
1046   // Check that slow_path label is reached with ZF not set.
1047   jcc(Assembler::notZero, zf_correct);
1048   stop("Fast Lock ZF != 0");
1049   bind(zf_bad_zero);
1050   stop("Fast Lock ZF != 1");
1051   bind(zf_correct);
1052 #endif
1053   // C2 uses the value of ZF to determine the continuation.
1054 }
1055 
1056 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, Register t1, Register t2, Register thread) {
1057   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1058   assert(reg_rax == rax, "Used for CAS");
1059   assert_different_registers(obj, reg_rax, t1, t2);
1060 
1061   // Handle inflated monitor.
1062   Label inflated, inflated_check_lock_stack;
1063   // Finish fast unlock successfully.  MUST jump with ZF == 1
1064   Label unlocked;
1065 
1066   const Register mark = t1;



1067   const Register top = reg_rax;
1068 
1069   Label dummy;
1070   C2FastUnlockLightweightStub* stub = nullptr;
1071 
1072   if (!Compile::current()->output()->in_scratch_emit_size()) {
1073     stub = new (Compile::current()->comp_arena()) C2FastUnlockLightweightStub(obj, mark, reg_rax, t2, thread);
1074     Compile::current()->output()->add_stub(stub);
1075   }
1076 
1077   Label& push_and_slow_path = stub == nullptr ? dummy : stub->push_and_slow_path();
1078   Label& check_successor = stub == nullptr ? dummy : stub->check_successor();
1079 
1080   { // Lightweight Unlock
1081 
1082     // Load top.
1083     movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
1084 
1085     // Prefetch mark.
1086     movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
1087 
1088     // Check if obj is top of lock-stack.
1089     cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
1090     // Top of lock stack was not obj. Must be monitor.
1091     jcc(Assembler::notEqual, inflated_check_lock_stack);
1092 
1093     // Pop lock-stack.

1146     movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
1147 #else // _LP64
1148     Label recursive;
1149 
1150     // Check if recursive.
1151     cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 0);
1152     jccb(Assembler::notEqual, recursive);
1153 
1154     // Check if the entry lists are empty.
1155     movptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
1156     orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
1157     jcc(Assembler::notZero, check_successor);
1158 
1159     // Release lock.
1160     movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
1161     jmpb(unlocked);
1162 
1163     // Recursive unlock.
1164     bind(recursive);
1165     decrement(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
1166     xorl(t1, t1);
1167 #endif
1168   }
1169 
1170   bind(unlocked);
1171   if (stub != nullptr) {
1172     bind(stub->unlocked_continuation());
1173   }
1174 
1175 #ifdef ASSERT
1176   // Check that unlocked label is reached with ZF set.
1177   Label zf_correct;
1178   jcc(Assembler::zero, zf_correct);
1179   stop("Fast Unlock ZF != 1");
1180 #endif
1181 
1182   if (stub != nullptr) {
1183     bind(stub->slow_path_continuation());
1184   }
1185 #ifdef ASSERT
1186   // Check that stub->continuation() label is reached with ZF not set.
< prev index next >