101 // We did not see an unlocked object so try the fast recursive case.
102
103 // Check if the owner is self by comparing the value in the
104 // markWord of object (disp_hdr) with the stack pointer.
105 sub(disp_hdr, disp_hdr, sp);
106 mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
107 // If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto label locked,
108 // hence we can store 0 as the displaced header in the box, which indicates that it is a
109 // recursive lock.
110 andr(tmp/*==0?*/, disp_hdr, tmp);
111 sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
112 beqz(tmp, locked);
113 j(slow_path);
114 }
115
116 // Handle existing monitor.
117 bind(object_has_monitor);
118 // The object's monitor m is unlocked iff m->owner == nullptr,
119 // otherwise m->owner may contain a thread or a stack address.
120 //
121 // Try to CAS m->owner from null to current thread.
122 add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
123 cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/xthread, Assembler::int64,
124 Assembler::aq, Assembler::rl, /*result*/tmp3Reg); // cas succeeds if tmp3Reg == zr(expected)
125
126 // Store a non-null value into the box to avoid looking like a re-entrant
127 // lock. The fast-path monitor unlock code checks for
128 // markWord::monitor_value so use markWord::unused_mark which has the
129 // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
130 mv(tmp, (address)markWord::unused_mark().value());
131 sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
132
133 beqz(tmp3Reg, locked); // CAS success means locking succeeded
134
135 bne(tmp3Reg, xthread, slow_path); // Check for recursive locking
136
137 // Recursive lock case
138 increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, tmp2Reg, tmp3Reg);
139
140 bind(locked);
141 mv(flag, zr);
142 increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2Reg, tmp3Reg);
143
144 #ifdef ASSERT
145 // Check that locked label is reached with flag == 0.
146 Label flag_correct;
147 beqz(flag, flag_correct);
148 stop("Fast Lock Flag != 0");
149 #endif
150
151 bind(slow_path);
152 #ifdef ASSERT
153 // Check that slow_path label is reached with flag != 0.
154 bnez(flag, flag_correct);
155 stop("Fast Lock Flag == 0");
156 bind(flag_correct);
157 #endif
158 // C2 uses the value of flag (0 vs !0) to determine the continuation.
159 }
160
161 void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg,
162 Register tmp1Reg, Register tmp2Reg) {
163 // Use cr register to indicate the fast_unlock result: zero for success; non-zero for failure.
164 Register flag = t1;
165 Register oop = objectReg;
166 Register box = boxReg;
167 Register disp_hdr = tmp1Reg;
168 Register tmp = tmp2Reg;
169 Label object_has_monitor;
170 // Finish fast lock successfully. MUST branch to with flag == 0
171 Label unlocked;
172 // Finish fast lock unsuccessfully. slow_path MUST branch to with flag != 0
173 Label slow_path;
174
175 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
176 assert_different_registers(oop, box, tmp, disp_hdr, flag, t0);
177
178 mv(flag, 1);
179
180 if (LockingMode == LM_LEGACY) {
181 // Find the lock address and load the displaced header from the stack.
182 ld(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
183
184 // If the displaced header is 0, we have a recursive unlock.
185 beqz(disp_hdr, unlocked);
186 }
187
188 // Handle existing monitor.
189 ld(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
190 test_bit(t0, tmp, exact_log2(markWord::monitor_value));
315 // After successful lock, push object on lock-stack.
316 add(tmp3_t, xthread, tmp2_top);
317 sd(obj, Address(tmp3_t));
318 addw(tmp2_top, tmp2_top, oopSize);
319 sw(tmp2_top, Address(xthread, JavaThread::lock_stack_top_offset()));
320 j(locked);
321 }
322
323 { // Handle inflated monitor.
324 bind(inflated);
325
326 // mark contains the tagged ObjectMonitor*.
327 const Register tmp1_tagged_monitor = tmp1_mark;
328 const uintptr_t monitor_tag = markWord::monitor_value;
329 const Register tmp2_owner_addr = tmp2;
330 const Register tmp3_owner = tmp3;
331
332 // Compute owner address.
333 la(tmp2_owner_addr, Address(tmp1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag)));
334
335 // CAS owner (null => current thread).
336 cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ xthread, Assembler::int64,
337 /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner);
338 beqz(tmp3_owner, locked);
339
340 // Check if recursive.
341 bne(tmp3_owner, xthread, slow_path);
342
343 // Recursive.
344 increment(Address(tmp1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1, tmp2, tmp3);
345 }
346
347 bind(locked);
348 mv(flag, zr);
349 increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2, tmp3);
350
351 #ifdef ASSERT
352 // Check that locked label is reached with flag == 0.
353 Label flag_correct;
354 beqz(flag, flag_correct);
355 stop("Fast Lock Flag != 0");
356 #endif
357
358 bind(slow_path);
359 #ifdef ASSERT
360 // Check that slow_path label is reached with flag != 0.
361 bnez(flag, flag_correct);
472 sd(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset()));
473 j(unlocked);
474
475 bind(not_recursive);
476
477 Label release;
478 const Register tmp2_owner_addr = tmp2;
479
480 // Compute owner address.
481 la(tmp2_owner_addr, Address(tmp1_monitor, ObjectMonitor::owner_offset()));
482
483 // Check if the entry lists are empty.
484 ld(t0, Address(tmp1_monitor, ObjectMonitor::EntryList_offset()));
485 ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset()));
486 orr(t0, t0, tmp3_t);
487 beqz(t0, release);
488
489 // The owner may be anonymous and we removed the last obj entry in
490 // the lock-stack. This loses the information about the owner.
491 // Write the thread to the owner field so the runtime knows the owner.
492 sd(xthread, Address(tmp2_owner_addr));
493 j(slow_path);
494
495 bind(release);
496 // Set owner to null.
497 membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
498 sd(zr, Address(tmp2_owner_addr));
499 }
500
501 bind(unlocked);
502 mv(flag, zr);
503 decrement(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2, tmp3);
504
505 #ifdef ASSERT
506 // Check that unlocked label is reached with flag == 0.
507 Label flag_correct;
508 beqz(flag, flag_correct);
509 stop("Fast Lock Flag != 0");
510 #endif
511
512 bind(slow_path);
|
101 // We did not see an unlocked object so try the fast recursive case.
102
103 // Check if the owner is self by comparing the value in the
104 // markWord of object (disp_hdr) with the stack pointer.
105 sub(disp_hdr, disp_hdr, sp);
106 mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
107 // If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto label locked,
108 // hence we can store 0 as the displaced header in the box, which indicates that it is a
109 // recursive lock.
110 andr(tmp/*==0?*/, disp_hdr, tmp);
111 sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
112 beqz(tmp, locked);
113 j(slow_path);
114 }
115
116 // Handle existing monitor.
117 bind(object_has_monitor);
118 // The object's monitor m is unlocked iff m->owner == nullptr,
119 // otherwise m->owner may contain a thread or a stack address.
120 //
121 // Try to CAS m->owner from null to current thread id.
122 Register tid = flag;
123 mv(tid, Address(xthread, JavaThread::lock_id_offset()));
124 add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
125 cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/tid, Assembler::int64,
126 Assembler::aq, Assembler::rl, /*result*/tmp3Reg); // cas succeeds if tmp3Reg == zr(expected)
127
128 // Store a non-null value into the box to avoid looking like a re-entrant
129 // lock. The fast-path monitor unlock code checks for
130 // markWord::monitor_value so use markWord::unused_mark which has the
131 // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
132 mv(tmp, (address)markWord::unused_mark().value());
133 sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
134
135 beqz(tmp3Reg, locked); // CAS success means locking succeeded
136
137 bne(tmp3Reg, tid, slow_path); // Check for recursive locking
138
139 // Recursive lock case
140 increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, tmp2Reg, tmp3Reg);
141
142 bind(locked);
143 mv(flag, zr);
144 increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2Reg, tmp3Reg);
145
146 #ifdef ASSERT
147 // Check that locked label is reached with flag == 0.
148 Label flag_correct;
149 beqz(flag, flag_correct);
150 stop("Fast Lock Flag != 0");
151 #endif
152
153 bind(slow_path);
154 #ifdef ASSERT
155 // Check that slow_path label is reached with flag != 0.
156 bnez(flag, flag_correct);
157 stop("Fast Lock Flag == 0");
158 bind(flag_correct);
159 #endif
160 // C2 uses the value of flag (0 vs !0) to determine the continuation.
161 }
162
163 void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg,
164 Register tmp1Reg, Register tmp2Reg) {
165 // Use cr register to indicate the fast_unlock result: zero for success; non-zero for failure.
166 Register flag = t1;
167 Register oop = objectReg;
168 Register box = boxReg;
169 Register disp_hdr = tmp1Reg;
170 Register tmp = tmp2Reg;
171 Label object_has_monitor;
172 // Finish fast lock successfully. MUST branch to vwith flag == 0
173 Label unlocked;
174 // Finish fast lock unsuccessfully. slow_path MUST branch to with flag != 0
175 Label slow_path;
176
177 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
178 assert_different_registers(oop, box, tmp, disp_hdr, flag, t0);
179
180 mv(flag, 1);
181
182 if (LockingMode == LM_LEGACY) {
183 // Find the lock address and load the displaced header from the stack.
184 ld(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
185
186 // If the displaced header is 0, we have a recursive unlock.
187 beqz(disp_hdr, unlocked);
188 }
189
190 // Handle existing monitor.
191 ld(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
192 test_bit(t0, tmp, exact_log2(markWord::monitor_value));
317 // After successful lock, push object on lock-stack.
318 add(tmp3_t, xthread, tmp2_top);
319 sd(obj, Address(tmp3_t));
320 addw(tmp2_top, tmp2_top, oopSize);
321 sw(tmp2_top, Address(xthread, JavaThread::lock_stack_top_offset()));
322 j(locked);
323 }
324
325 { // Handle inflated monitor.
326 bind(inflated);
327
328 // mark contains the tagged ObjectMonitor*.
329 const Register tmp1_tagged_monitor = tmp1_mark;
330 const uintptr_t monitor_tag = markWord::monitor_value;
331 const Register tmp2_owner_addr = tmp2;
332 const Register tmp3_owner = tmp3;
333
334 // Compute owner address.
335 la(tmp2_owner_addr, Address(tmp1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag)));
336
337 // CAS owner (null => current thread id).
338 Register tid = flag;
339 mv(tid, Address(xthread, JavaThread::lock_id_offset()));
340 cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ tid, Assembler::int64,
341 /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner);
342 beqz(tmp3_owner, locked);
343
344 // Check if recursive.
345 bne(tmp3_owner, tid, slow_path);
346
347 // Recursive.
348 increment(Address(tmp1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1, tmp2, tmp3);
349 }
350
351 bind(locked);
352 mv(flag, zr);
353 increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2, tmp3);
354
355 #ifdef ASSERT
356 // Check that locked label is reached with flag == 0.
357 Label flag_correct;
358 beqz(flag, flag_correct);
359 stop("Fast Lock Flag != 0");
360 #endif
361
362 bind(slow_path);
363 #ifdef ASSERT
364 // Check that slow_path label is reached with flag != 0.
365 bnez(flag, flag_correct);
476 sd(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset()));
477 j(unlocked);
478
479 bind(not_recursive);
480
481 Label release;
482 const Register tmp2_owner_addr = tmp2;
483
484 // Compute owner address.
485 la(tmp2_owner_addr, Address(tmp1_monitor, ObjectMonitor::owner_offset()));
486
487 // Check if the entry lists are empty.
488 ld(t0, Address(tmp1_monitor, ObjectMonitor::EntryList_offset()));
489 ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset()));
490 orr(t0, t0, tmp3_t);
491 beqz(t0, release);
492
493 // The owner may be anonymous and we removed the last obj entry in
494 // the lock-stack. This loses the information about the owner.
495 // Write the thread to the owner field so the runtime knows the owner.
496 Register tid = flag;
497 mv(tid, Address(xthread, JavaThread::lock_id_offset()));
498 sd(tid, Address(tmp2_owner_addr));
499 j(slow_path);
500
501 bind(release);
502 // Set owner to null.
503 membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
504 sd(zr, Address(tmp2_owner_addr));
505 }
506
507 bind(unlocked);
508 mv(flag, zr);
509 decrement(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2, tmp3);
510
511 #ifdef ASSERT
512 // Check that unlocked label is reached with flag == 0.
513 Label flag_correct;
514 beqz(flag, flag_correct);
515 stop("Fast Lock Flag != 0");
516 #endif
517
518 bind(slow_path);
|