< prev index next >

src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp

Print this page

  28 #include "asm/assembler.inline.hpp"
  29 #include "opto/c2_MacroAssembler.hpp"
  30 #include "opto/compile.hpp"
  31 #include "opto/intrinsicnode.hpp"
  32 #include "opto/output.hpp"
  33 #include "opto/subnode.hpp"
  34 #include "runtime/stubRoutines.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 
  37 #ifdef PRODUCT
  38 #define BLOCK_COMMENT(str) /* nothing */
  39 #define STOP(error) stop(error)
  40 #else
  41 #define BLOCK_COMMENT(str) block_comment(str)
  42 #define STOP(error) block_comment(error); stop(error)
  43 #endif
  44 
  45 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  46 
  47 void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg,
  48                                   Register tmp1Reg, Register tmp2Reg, Register tmp3Reg) {
  49   // Use cr register to indicate the fast_lock result: zero for success; non-zero for failure.
  50   Register flag = t1;
  51   Register oop = objectReg;
  52   Register box = boxReg;
  53   Register disp_hdr = tmp1Reg;
  54   Register tmp = tmp2Reg;
  55   Label object_has_monitor;
  56   // Finish fast lock successfully. MUST branch to with flag == 0
  57   Label locked;
  58   // Finish fast lock unsuccessfully. slow_path MUST branch to with flag != 0
  59   Label slow_path;
  60 
  61   assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
  62   assert_different_registers(oop, box, tmp, disp_hdr, flag, tmp3Reg, t0);
  63 
  64   mv(flag, 1);
  65 
  66   // Load markWord from object into displaced_header.
  67   ld(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
  68 

  87     // Initialize the box. (Must happen before we update the object mark!)
  88     sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
  89 
  90     // Compare object markWord with an unlocked value (tmp) and if
  91     // equal exchange the stack address of our box with object markWord.
  92     // On failure disp_hdr contains the possibly locked markWord.
  93     cmpxchg(/*memory address*/oop, /*expected value*/tmp, /*new value*/box, Assembler::int64,
  94             Assembler::aq, Assembler::rl, /*result*/disp_hdr);
  95     beq(disp_hdr, tmp, locked);
  96 
  97     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  98 
  99     // If the compare-and-exchange succeeded, then we found an unlocked
 100     // object, will have now locked it will continue at label locked
 101     // We did not see an unlocked object so try the fast recursive case.
 102 
 103     // Check if the owner is self by comparing the value in the
 104     // markWord of object (disp_hdr) with the stack pointer.
 105     sub(disp_hdr, disp_hdr, sp);
 106     mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
 107     // If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto label locked,
 108     // hence we can store 0 as the displaced header in the box, which indicates that it is a
 109     // recursive lock.
 110     andr(tmp/*==0?*/, disp_hdr, tmp);
 111     sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 112     beqz(tmp, locked);
 113     j(slow_path);
 114   }
 115 
 116   // Handle existing monitor.
 117   bind(object_has_monitor);

 118   // The object's monitor m is unlocked iff m->owner == nullptr,
 119   // otherwise m->owner may contain a thread or a stack address.

 120   //
 121   // Try to CAS m->owner from null to current thread.
 122   add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
 123   cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/xthread, Assembler::int64,


 124           Assembler::aq, Assembler::rl, /*result*/tmp3Reg); // cas succeeds if tmp3Reg == zr(expected)
 125 
 126   // Store a non-null value into the box to avoid looking like a re-entrant
 127   // lock. The fast-path monitor unlock code checks for
 128   // markWord::monitor_value so use markWord::unused_mark which has the
 129   // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
 130   mv(tmp, (address)markWord::unused_mark().value());
 131   sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 132 
 133   beqz(tmp3Reg, locked); // CAS success means locking succeeded
 134 
 135   bne(tmp3Reg, xthread, slow_path); // Check for recursive locking
 136 
 137   // Recursive lock case
 138   increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, tmp2Reg, tmp3Reg);
 139 
 140   bind(locked);
 141   mv(flag, zr);
 142   increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2Reg, tmp3Reg);


 143 
 144 #ifdef ASSERT
 145   // Check that locked label is reached with flag == 0.
 146   Label flag_correct;
 147   beqz(flag, flag_correct);
 148   stop("Fast Lock Flag != 0");
 149 #endif
 150 
 151   bind(slow_path);
 152 #ifdef ASSERT
 153   // Check that slow_path label is reached with flag != 0.
 154   bnez(flag, flag_correct);
 155   stop("Fast Lock Flag == 0");
 156   bind(flag_correct);
 157 #endif
 158   // C2 uses the value of flag (0 vs !0) to determine the continuation.
 159 }
 160 
 161 void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg,
 162                                     Register tmp1Reg, Register tmp2Reg) {

 236 
 237   // Check if the entry lists are empty.
 238   ld(t0, Address(tmp, ObjectMonitor::EntryList_offset()));
 239   ld(tmp1Reg, Address(tmp, ObjectMonitor::cxq_offset()));
 240   orr(t0, t0, tmp1Reg);
 241   beqz(t0, unlocked); // If so we are done.
 242 
 243   // Check if there is a successor.
 244   ld(t0, Address(tmp, ObjectMonitor::succ_offset()));
 245   bnez(t0, unlocked); // If so we are done.
 246 
 247   // Save the monitor pointer in the current thread, so we can try to
 248   // reacquire the lock in SharedRuntime::monitor_exit_helper().
 249   sd(tmp, Address(xthread, JavaThread::unlocked_inflated_monitor_offset()));
 250 
 251   mv(flag, 1);
 252   j(slow_path);
 253 
 254   bind(unlocked);
 255   mv(flag, zr);
 256   decrement(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp1Reg, tmp2Reg);


 257 
 258 #ifdef ASSERT
 259   // Check that unlocked label is reached with flag == 0.
 260   Label flag_correct;
 261   beqz(flag, flag_correct);
 262   stop("Fast Lock Flag != 0");
 263 #endif
 264 
 265   bind(slow_path);
 266 #ifdef ASSERT
 267   // Check that slow_path label is reached with flag != 0.
 268   bnez(flag, flag_correct);
 269   stop("Fast Lock Flag == 0");
 270   bind(flag_correct);
 271 #endif
 272   // C2 uses the value of flag (0 vs !0) to determine the continuation.
 273 }
 274 
 275 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
 276                                               Register tmp1, Register tmp2, Register tmp3) {
 277   // Flag register, zero for success; non-zero for failure.
 278   Register flag = t1;
 279 
 280   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 281   assert_different_registers(obj, box, tmp1, tmp2, tmp3, flag, t0);
 282 
 283   mv(flag, 1);
 284 
 285   // Handle inflated monitor.
 286   Label inflated;
 287   // Finish fast lock successfully. MUST branch to with flag == 0
 288   Label locked;
 289   // Finish fast lock unsuccessfully. slow_path MUST branch to with flag != 0
 290   Label slow_path;
 291 
 292   if (UseObjectMonitorTable) {
 293     // Clear cache in case fast locking succeeds.
 294     sd(zr, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
 295   }
 296 
 297   if (DiagnoseSyncOnValueBasedClasses != 0) {
 298     load_klass(tmp1, obj);
 299     lbu(tmp1, Address(tmp1, Klass::misc_flags_offset()));
 300     test_bit(tmp1, tmp1, exact_log2(KlassFlags::_misc_is_value_based_class));
 301     bnez(tmp1, slow_path);

 332     // Try to lock. Transition lock-bits 0b01 => 0b00
 333     ori(tmp1_mark, tmp1_mark, markWord::unlocked_value);
 334     xori(tmp3_t, tmp1_mark, markWord::unlocked_value);
 335     cmpxchg(/*addr*/ obj, /*expected*/ tmp1_mark, /*new*/ tmp3_t, Assembler::int64,
 336             /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_t);
 337     bne(tmp1_mark, tmp3_t, slow_path);
 338 
 339     bind(push);
 340     // After successful lock, push object on lock-stack.
 341     add(tmp3_t, xthread, tmp2_top);
 342     sd(obj, Address(tmp3_t));
 343     addw(tmp2_top, tmp2_top, oopSize);
 344     sw(tmp2_top, Address(xthread, JavaThread::lock_stack_top_offset()));
 345     j(locked);
 346   }
 347 
 348   { // Handle inflated monitor.
 349     bind(inflated);
 350 
 351     const Register tmp1_monitor = tmp1;

 352     if (!UseObjectMonitorTable) {
 353       assert(tmp1_monitor == tmp1_mark, "should be the same here");
 354     } else {
 355       Label monitor_found;
 356 
 357       // Load cache address
 358       la(tmp3_t, Address(xthread, JavaThread::om_cache_oops_offset()));
 359 
 360       const int num_unrolled = 2;
 361       for (int i = 0; i < num_unrolled; i++) {
 362         ld(tmp1, Address(tmp3_t));
 363         beq(obj, tmp1, monitor_found);
 364         add(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference()));
 365       }
 366 
 367       Label loop;
 368 
 369       // Search for obj in cache.
 370       bind(loop);
 371 

 378       bnez(tmp1, loop);
 379       // Cache Miss. Take the slowpath.
 380       j(slow_path);
 381 
 382       bind(monitor_found);
 383       ld(tmp1_monitor, Address(tmp3_t, OMCache::oop_to_monitor_difference()));
 384     }
 385 
 386     const Register tmp2_owner_addr = tmp2;
 387     const Register tmp3_owner = tmp3;
 388 
 389     const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast<int>(markWord::monitor_value));
 390     const Address owner_address(tmp1_monitor, ObjectMonitor::owner_offset() - monitor_tag);
 391     const Address recursions_address(tmp1_monitor, ObjectMonitor::recursions_offset() - monitor_tag);
 392 
 393     Label monitor_locked;
 394 
 395     // Compute owner address.
 396     la(tmp2_owner_addr, owner_address);
 397 
 398     // CAS owner (null => current thread).
 399     cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ xthread, Assembler::int64,


 400             /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner);
 401     beqz(tmp3_owner, monitor_locked);
 402 
 403     // Check if recursive.
 404     bne(tmp3_owner, xthread, slow_path);
 405 
 406     // Recursive.
 407     increment(recursions_address, 1, tmp2, tmp3);
 408 
 409     bind(monitor_locked);
 410     if (UseObjectMonitorTable) {
 411       sd(tmp1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
 412     }
 413   }
 414 
 415   bind(locked);
 416   mv(flag, zr);
 417   increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2, tmp3);
 418 
 419 #ifdef ASSERT
 420   // Check that locked label is reached with flag == 0.
 421   Label flag_correct;
 422   beqz(flag, flag_correct);
 423   stop("Fast Lock Flag != 0");
 424 #endif
 425 
 426   bind(slow_path);
 427 #ifdef ASSERT
 428   // Check that slow_path label is reached with flag != 0.
 429   bnez(flag, flag_correct);
 430   stop("Fast Lock Flag == 0");
 431   bind(flag_correct);
 432 #endif
 433   // C2 uses the value of flag (0 vs !0) to determine the continuation.
 434 }
 435 
 436 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box,
 437                                                 Register tmp1, Register tmp2, Register tmp3) {

 569     // Check if the entry lists are empty.
 570     ld(t0, Address(tmp1_monitor, ObjectMonitor::EntryList_offset()));
 571     ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset()));
 572     orr(t0, t0, tmp3_t);
 573     beqz(t0, unlocked); // If so we are done.
 574 
 575     // Check if there is a successor.
 576     ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::succ_offset()));
 577     bnez(tmp3_t, unlocked); // If so we are done.
 578 
 579     // Save the monitor pointer in the current thread, so we can try
 580     // to reacquire the lock in SharedRuntime::monitor_exit_helper().
 581     sd(tmp1_monitor, Address(xthread, JavaThread::unlocked_inflated_monitor_offset()));
 582 
 583     mv(flag, 1);
 584     j(slow_path);
 585   }
 586 
 587   bind(unlocked);
 588   mv(flag, zr);
 589   decrement(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2, tmp3);
 590 
 591 #ifdef ASSERT
 592   // Check that unlocked label is reached with flag == 0.
 593   Label flag_correct;
 594   beqz(flag, flag_correct);
 595   stop("Fast Lock Flag != 0");
 596 #endif
 597 
 598   bind(slow_path);
 599 #ifdef ASSERT
 600   // Check that slow_path label is reached with flag != 0.
 601   bnez(flag, flag_correct);
 602   stop("Fast Lock Flag == 0");
 603   bind(flag_correct);
 604 #endif
 605   // C2 uses the value of flag (0 vs !0) to determine the continuation.
 606 }
 607 
 608 // short string
 609 // StringUTF16.indexOfChar

  28 #include "asm/assembler.inline.hpp"
  29 #include "opto/c2_MacroAssembler.hpp"
  30 #include "opto/compile.hpp"
  31 #include "opto/intrinsicnode.hpp"
  32 #include "opto/output.hpp"
  33 #include "opto/subnode.hpp"
  34 #include "runtime/stubRoutines.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 
  37 #ifdef PRODUCT
  38 #define BLOCK_COMMENT(str) /* nothing */
  39 #define STOP(error) stop(error)
  40 #else
  41 #define BLOCK_COMMENT(str) block_comment(str)
  42 #define STOP(error) block_comment(error); stop(error)
  43 #endif
  44 
  45 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  46 
  47 void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg,
  48                                   Register tmp1Reg, Register tmp2Reg, Register tmp3Reg, Register tmp4Reg) {
  49   // Use cr register to indicate the fast_lock result: zero for success; non-zero for failure.
  50   Register flag = t1;
  51   Register oop = objectReg;
  52   Register box = boxReg;
  53   Register disp_hdr = tmp1Reg;
  54   Register tmp = tmp2Reg;
  55   Label object_has_monitor;
  56   // Finish fast lock successfully. MUST branch to with flag == 0
  57   Label locked;
  58   // Finish fast lock unsuccessfully. slow_path MUST branch to with flag != 0
  59   Label slow_path;
  60 
  61   assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
  62   assert_different_registers(oop, box, tmp, disp_hdr, flag, tmp3Reg, t0);
  63 
  64   mv(flag, 1);
  65 
  66   // Load markWord from object into displaced_header.
  67   ld(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
  68 

  87     // Initialize the box. (Must happen before we update the object mark!)
  88     sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
  89 
  90     // Compare object markWord with an unlocked value (tmp) and if
  91     // equal exchange the stack address of our box with object markWord.
  92     // On failure disp_hdr contains the possibly locked markWord.
  93     cmpxchg(/*memory address*/oop, /*expected value*/tmp, /*new value*/box, Assembler::int64,
  94             Assembler::aq, Assembler::rl, /*result*/disp_hdr);
  95     beq(disp_hdr, tmp, locked);
  96 
  97     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  98 
  99     // If the compare-and-exchange succeeded, then we found an unlocked
 100     // object, will have now locked it will continue at label locked
 101     // We did not see an unlocked object so try the fast recursive case.
 102 
 103     // Check if the owner is self by comparing the value in the
 104     // markWord of object (disp_hdr) with the stack pointer.
 105     sub(disp_hdr, disp_hdr, sp);
 106     mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
 107     // If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto label
 108     // locked, hence we can store 0 as the displaced header in the box, which indicates that it
 109     // is a recursive lock.
 110     andr(tmp/*==0?*/, disp_hdr, tmp);
 111     sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 112     beqz(tmp, locked);
 113     j(slow_path);
 114   }
 115 
 116   // Handle existing monitor.
 117   bind(object_has_monitor);
 118 
 119   // The object's monitor m is unlocked iff m->owner == nullptr,
 120   // otherwise m->owner may contain a thread id, a stack address for LM_LEGACY,
 121   // the ANONYMOUS_OWNER constant for LM_LIGHTWEIGHT.
 122   //
 123   // Try to CAS m->owner from null to current thread id.
 124   add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
 125   Register tid = tmp4Reg;
 126   ld(tid, Address(xthread, JavaThread::lock_id_offset()));
 127   cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/tid, Assembler::int64,
 128           Assembler::aq, Assembler::rl, /*result*/tmp3Reg); // cas succeeds if tmp3Reg == zr(expected)
 129 
 130   // Store a non-null value into the box to avoid looking like a re-entrant
 131   // lock. The fast-path monitor unlock code checks for
 132   // markWord::monitor_value so use markWord::unused_mark which has the
 133   // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
 134   mv(tmp, (address)markWord::unused_mark().value());
 135   sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 136 
 137   beqz(tmp3Reg, locked); // CAS success means locking succeeded
 138 
 139   bne(tmp3Reg, tid, slow_path); // Check for recursive locking
 140 
 141   // Recursive lock case
 142   increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, tmp2Reg, tmp3Reg);
 143 
 144   bind(locked);
 145   mv(flag, zr);
 146   if (LockingMode == LM_LEGACY) {
 147     inc_held_monitor_count();
 148   }
 149 
 150 #ifdef ASSERT
 151   // Check that locked label is reached with flag == 0.
 152   Label flag_correct;
 153   beqz(flag, flag_correct);
 154   stop("Fast Lock Flag != 0");
 155 #endif
 156 
 157   bind(slow_path);
 158 #ifdef ASSERT
 159   // Check that slow_path label is reached with flag != 0.
 160   bnez(flag, flag_correct);
 161   stop("Fast Lock Flag == 0");
 162   bind(flag_correct);
 163 #endif
 164   // C2 uses the value of flag (0 vs !0) to determine the continuation.
 165 }
 166 
 167 void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg,
 168                                     Register tmp1Reg, Register tmp2Reg) {

 242 
 243   // Check if the entry lists are empty.
 244   ld(t0, Address(tmp, ObjectMonitor::EntryList_offset()));
 245   ld(tmp1Reg, Address(tmp, ObjectMonitor::cxq_offset()));
 246   orr(t0, t0, tmp1Reg);
 247   beqz(t0, unlocked); // If so we are done.
 248 
 249   // Check if there is a successor.
 250   ld(t0, Address(tmp, ObjectMonitor::succ_offset()));
 251   bnez(t0, unlocked); // If so we are done.
 252 
 253   // Save the monitor pointer in the current thread, so we can try to
 254   // reacquire the lock in SharedRuntime::monitor_exit_helper().
 255   sd(tmp, Address(xthread, JavaThread::unlocked_inflated_monitor_offset()));
 256 
 257   mv(flag, 1);
 258   j(slow_path);
 259 
 260   bind(unlocked);
 261   mv(flag, zr);
 262   if (LockingMode == LM_LEGACY) {
 263     dec_held_monitor_count();
 264   }
 265 
 266 #ifdef ASSERT
 267   // Check that unlocked label is reached with flag == 0.
 268   Label flag_correct;
 269   beqz(flag, flag_correct);
 270   stop("Fast Lock Flag != 0");
 271 #endif
 272 
 273   bind(slow_path);
 274 #ifdef ASSERT
 275   // Check that slow_path label is reached with flag != 0.
 276   bnez(flag, flag_correct);
 277   stop("Fast Lock Flag == 0");
 278   bind(flag_correct);
 279 #endif
 280   // C2 uses the value of flag (0 vs !0) to determine the continuation.
 281 }
 282 
 283 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
 284                                               Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
 285   // Flag register, zero for success; non-zero for failure.
 286   Register flag = t1;
 287 
 288   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 289   assert_different_registers(obj, box, tmp1, tmp2, tmp3, tmp4, flag, t0);
 290 
 291   mv(flag, 1);
 292 
 293   // Handle inflated monitor.
 294   Label inflated;
 295   // Finish fast lock successfully. MUST branch to with flag == 0
 296   Label locked;
 297   // Finish fast lock unsuccessfully. slow_path MUST branch to with flag != 0
 298   Label slow_path;
 299 
 300   if (UseObjectMonitorTable) {
 301     // Clear cache in case fast locking succeeds.
 302     sd(zr, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
 303   }
 304 
 305   if (DiagnoseSyncOnValueBasedClasses != 0) {
 306     load_klass(tmp1, obj);
 307     lbu(tmp1, Address(tmp1, Klass::misc_flags_offset()));
 308     test_bit(tmp1, tmp1, exact_log2(KlassFlags::_misc_is_value_based_class));
 309     bnez(tmp1, slow_path);

 340     // Try to lock. Transition lock-bits 0b01 => 0b00
 341     ori(tmp1_mark, tmp1_mark, markWord::unlocked_value);
 342     xori(tmp3_t, tmp1_mark, markWord::unlocked_value);
 343     cmpxchg(/*addr*/ obj, /*expected*/ tmp1_mark, /*new*/ tmp3_t, Assembler::int64,
 344             /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_t);
 345     bne(tmp1_mark, tmp3_t, slow_path);
 346 
 347     bind(push);
 348     // After successful lock, push object on lock-stack.
 349     add(tmp3_t, xthread, tmp2_top);
 350     sd(obj, Address(tmp3_t));
 351     addw(tmp2_top, tmp2_top, oopSize);
 352     sw(tmp2_top, Address(xthread, JavaThread::lock_stack_top_offset()));
 353     j(locked);
 354   }
 355 
 356   { // Handle inflated monitor.
 357     bind(inflated);
 358 
 359     const Register tmp1_monitor = tmp1;
 360 
 361     if (!UseObjectMonitorTable) {
 362       assert(tmp1_monitor == tmp1_mark, "should be the same here");
 363     } else {
 364       Label monitor_found;
 365 
 366       // Load cache address
 367       la(tmp3_t, Address(xthread, JavaThread::om_cache_oops_offset()));
 368 
 369       const int num_unrolled = 2;
 370       for (int i = 0; i < num_unrolled; i++) {
 371         ld(tmp1, Address(tmp3_t));
 372         beq(obj, tmp1, monitor_found);
 373         add(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference()));
 374       }
 375 
 376       Label loop;
 377 
 378       // Search for obj in cache.
 379       bind(loop);
 380 

 387       bnez(tmp1, loop);
 388       // Cache Miss. Take the slowpath.
 389       j(slow_path);
 390 
 391       bind(monitor_found);
 392       ld(tmp1_monitor, Address(tmp3_t, OMCache::oop_to_monitor_difference()));
 393     }
 394 
 395     const Register tmp2_owner_addr = tmp2;
 396     const Register tmp3_owner = tmp3;
 397 
 398     const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast<int>(markWord::monitor_value));
 399     const Address owner_address(tmp1_monitor, ObjectMonitor::owner_offset() - monitor_tag);
 400     const Address recursions_address(tmp1_monitor, ObjectMonitor::recursions_offset() - monitor_tag);
 401 
 402     Label monitor_locked;
 403 
 404     // Compute owner address.
 405     la(tmp2_owner_addr, owner_address);
 406 
 407     // CAS owner (null => current thread id).
 408     Register tid = tmp4;
 409     ld(tid, Address(xthread, JavaThread::lock_id_offset()));
 410     cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ tid, Assembler::int64,
 411             /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner);
 412     beqz(tmp3_owner, monitor_locked);
 413 
 414     // Check if recursive.
 415     bne(tmp3_owner, tid, slow_path);
 416 
 417     // Recursive.
 418     increment(recursions_address, 1, tmp2, tmp3);
 419 
 420     bind(monitor_locked);
 421     if (UseObjectMonitorTable) {
 422       sd(tmp1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
 423     }
 424   }
 425 
 426   bind(locked);
 427   mv(flag, zr);

 428 
 429 #ifdef ASSERT
 430   // Check that locked label is reached with flag == 0.
 431   Label flag_correct;
 432   beqz(flag, flag_correct);
 433   stop("Fast Lock Flag != 0");
 434 #endif
 435 
 436   bind(slow_path);
 437 #ifdef ASSERT
 438   // Check that slow_path label is reached with flag != 0.
 439   bnez(flag, flag_correct);
 440   stop("Fast Lock Flag == 0");
 441   bind(flag_correct);
 442 #endif
 443   // C2 uses the value of flag (0 vs !0) to determine the continuation.
 444 }
 445 
 446 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box,
 447                                                 Register tmp1, Register tmp2, Register tmp3) {

 579     // Check if the entry lists are empty.
 580     ld(t0, Address(tmp1_monitor, ObjectMonitor::EntryList_offset()));
 581     ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset()));
 582     orr(t0, t0, tmp3_t);
 583     beqz(t0, unlocked); // If so we are done.
 584 
 585     // Check if there is a successor.
 586     ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::succ_offset()));
 587     bnez(tmp3_t, unlocked); // If so we are done.
 588 
 589     // Save the monitor pointer in the current thread, so we can try
 590     // to reacquire the lock in SharedRuntime::monitor_exit_helper().
 591     sd(tmp1_monitor, Address(xthread, JavaThread::unlocked_inflated_monitor_offset()));
 592 
 593     mv(flag, 1);
 594     j(slow_path);
 595   }
 596 
 597   bind(unlocked);
 598   mv(flag, zr);

 599 
 600 #ifdef ASSERT
 601   // Check that unlocked label is reached with flag == 0.
 602   Label flag_correct;
 603   beqz(flag, flag_correct);
 604   stop("Fast Lock Flag != 0");
 605 #endif
 606 
 607   bind(slow_path);
 608 #ifdef ASSERT
 609   // Check that slow_path label is reached with flag != 0.
 610   bnez(flag, flag_correct);
 611   stop("Fast Lock Flag == 0");
 612   bind(flag_correct);
 613 #endif
 614   // C2 uses the value of flag (0 vs !0) to determine the continuation.
 615 }
 616 
 617 // short string
 618 // StringUTF16.indexOfChar
< prev index next >