40 #else
41 #define BLOCK_COMMENT(str) block_comment(str)
42 #define STOP(error) block_comment(error); stop(error)
43 #endif
44
45 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
46
47 typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
48
49 void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register tmpReg,
50 Register tmp2Reg, Register tmp3Reg) {
51 Register oop = objectReg;
52 Register box = boxReg;
53 Register disp_hdr = tmpReg;
54 Register tmp = tmp2Reg;
55 Label cont;
56 Label object_has_monitor;
57 Label count, no_count;
58
59 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
60 assert_different_registers(oop, box, tmp, disp_hdr);
61
62 // Load markWord from object into displaced_header.
63 ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
64
65 if (DiagnoseSyncOnValueBasedClasses != 0) {
66 load_klass(tmp, oop);
67 ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
68 tst(tmp, KlassFlags::_misc_is_value_based_class);
69 br(Assembler::NE, cont);
70 }
71
72 // Check for existing monitor
73 tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
74
75 if (LockingMode == LM_MONITOR) {
76 tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
77 b(cont);
78 } else {
79 assert(LockingMode == LM_LEGACY, "must be");
80 // Set tmp to be (markWord of object | UNLOCK_VALUE).
93 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
94
95 // If the compare-and-exchange succeeded, then we found an unlocked
96 // object, will have now locked it will continue at label cont
97
98 // Check if the owner is self by comparing the value in the
99 // markWord of object (disp_hdr) with the stack pointer.
100 mov(rscratch1, sp);
101 sub(disp_hdr, disp_hdr, rscratch1);
102 mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
103 // If condition is true we are cont and hence we can store 0 as the
104 // displaced header in the box, which indicates that it is a recursive lock.
105 ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
106 str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
107 b(cont);
108 }
109
110 // Handle existing monitor.
111 bind(object_has_monitor);
112
113 // The object's monitor m is unlocked iff m->owner == nullptr,
114 // otherwise m->owner may contain a thread or a stack address.
115 //
116 // Try to CAS m->owner from null to current thread.
117 add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
118 cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
119 /*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
120
121 // Store a non-null value into the box to avoid looking like a re-entrant
122 // lock. The fast-path monitor unlock code checks for
123 // markWord::monitor_value so use markWord::unused_mark which has the
124 // relevant bit set, and also matches ObjectSynchronizer::enter.
125 mov(tmp, (address)markWord::unused_mark().value());
126 str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
127
128 br(Assembler::EQ, cont); // CAS success means locking succeeded
129
130 cmp(tmp3Reg, rthread);
131 br(Assembler::NE, cont); // Check for recursive locking
132
133 // Recursive lock case
134 increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
135 // flag == EQ still from the cmp above, checking if this is a reentrant lock
136
137 bind(cont);
138 // flag == EQ indicates success
139 // flag == NE indicates failure
140 br(Assembler::NE, no_count);
141
142 bind(count);
143 increment(Address(rthread, JavaThread::held_monitor_count_offset()));
144
145 bind(no_count);
146 }
147
148 void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Register tmpReg,
149 Register tmp2Reg) {
150 Register oop = objectReg;
151 Register box = boxReg;
152 Register disp_hdr = tmpReg;
153 Register owner_addr = tmpReg;
154 Register tmp = tmp2Reg;
155 Label cont;
156 Label object_has_monitor;
157 Label count, no_count;
158 Label unlocked;
159
160 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
161 assert_different_registers(oop, box, tmp, disp_hdr);
162
163 if (LockingMode == LM_LEGACY) {
230 br(Assembler::NE, unlocked); // If so we are done.
231
232 // Save the monitor pointer in the current thread, so we can try to
233 // reacquire the lock in SharedRuntime::monitor_exit_helper().
234 str(tmp, Address(rthread, JavaThread::unlocked_inflated_monitor_offset()));
235
236 cmp(zr, rthread); // Set Flag to NE => slow path
237 b(cont);
238
239 bind(unlocked);
240 cmp(zr, zr); // Set Flag to EQ => fast path
241
242 // Intentional fall-through
243
244 bind(cont);
245 // flag == EQ indicates success
246 // flag == NE indicates failure
247 br(Assembler::NE, no_count);
248
249 bind(count);
250 decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
251
252 bind(no_count);
253 }
254
255 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1,
256 Register t2, Register t3) {
257 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
258 assert_different_registers(obj, box, t1, t2, t3);
259
260 // Handle inflated monitor.
261 Label inflated;
262 // Finish fast lock successfully. MUST branch to with flag == EQ
263 Label locked;
264 // Finish fast lock unsuccessfully. MUST branch to with flag == NE
265 Label slow_path;
266
267 if (UseObjectMonitorTable) {
268 // Clear cache in case fast locking succeeds.
269 str(zr, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
270 }
271
272 if (DiagnoseSyncOnValueBasedClasses != 0) {
273 load_klass(t1, obj);
274 ldrb(t1, Address(t1, Klass::misc_flags_offset()));
275 tst(t1, KlassFlags::_misc_is_value_based_class);
276 br(Assembler::NE, slow_path);
277 }
278
354 increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
355 cbnz(t1, loop);
356 // Cache Miss, NE set from cmp above, cbnz does not set flags
357 b(slow_path);
358
359 bind(monitor_found);
360 ldr(t1_monitor, Address(t3_t, OMCache::oop_to_monitor_difference()));
361 }
362
363 const Register t2_owner_addr = t2;
364 const Register t3_owner = t3;
365 const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast<int>(markWord::monitor_value));
366 const Address owner_address(t1_monitor, ObjectMonitor::owner_offset() - monitor_tag);
367 const Address recursions_address(t1_monitor, ObjectMonitor::recursions_offset() - monitor_tag);
368
369 Label monitor_locked;
370
371 // Compute owner address.
372 lea(t2_owner_addr, owner_address);
373
374 // CAS owner (null => current thread).
375 cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true,
376 /*release*/ false, /*weak*/ false, t3_owner);
377 br(Assembler::EQ, monitor_locked);
378
379 // Check if recursive.
380 cmp(t3_owner, rthread);
381 br(Assembler::NE, slow_path);
382
383 // Recursive.
384 increment(recursions_address, 1);
385
386 bind(monitor_locked);
387 if (UseObjectMonitorTable) {
388 str(t1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
389 }
390 }
391
392 bind(locked);
393 increment(Address(rthread, JavaThread::held_monitor_count_offset()));
394
395 #ifdef ASSERT
396 // Check that locked label is reached with Flags == EQ.
397 Label flag_correct;
398 br(Assembler::EQ, flag_correct);
399 stop("Fast Lock Flag != EQ");
400 #endif
401
402 bind(slow_path);
403 #ifdef ASSERT
404 // Check that slow_path label is reached with Flags == NE.
405 br(Assembler::NE, flag_correct);
406 stop("Fast Lock Flag != NE");
407 bind(flag_correct);
408 #endif
409 // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
410 }
411
412 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register t1,
413 Register t2, Register t3) {
542 ldr(rscratch1, Address(t1_monitor, ObjectMonitor::EntryList_offset()));
543 ldr(t3_t, Address(t1_monitor, ObjectMonitor::cxq_offset()));
544 orr(rscratch1, rscratch1, t3_t);
545 cmp(rscratch1, zr);
546 br(Assembler::EQ, unlocked); // If so we are done.
547
548 // Check if there is a successor.
549 ldr(rscratch1, Address(t1_monitor, ObjectMonitor::succ_offset()));
550 cmp(rscratch1, zr);
551 br(Assembler::NE, unlocked); // If so we are done.
552
553 // Save the monitor pointer in the current thread, so we can try to
554 // reacquire the lock in SharedRuntime::monitor_exit_helper().
555 str(t1_monitor, Address(rthread, JavaThread::unlocked_inflated_monitor_offset()));
556
557 cmp(zr, rthread); // Set Flag to NE => slow path
558 b(slow_path);
559 }
560
561 bind(unlocked);
562 decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
563 cmp(zr, zr); // Set Flags to EQ => fast path
564
565 #ifdef ASSERT
566 // Check that unlocked label is reached with Flags == EQ.
567 Label flag_correct;
568 br(Assembler::EQ, flag_correct);
569 stop("Fast Unlock Flag != EQ");
570 #endif
571
572 bind(slow_path);
573 #ifdef ASSERT
574 // Check that slow_path label is reached with Flags == NE.
575 br(Assembler::NE, flag_correct);
576 stop("Fast Unlock Flag != NE");
577 bind(flag_correct);
578 #endif
579 // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
580 }
581
582 // Search for str1 in str2 and return index or -1
|
40 #else
41 #define BLOCK_COMMENT(str) block_comment(str)
42 #define STOP(error) block_comment(error); stop(error)
43 #endif
44
45 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
46
47 typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
48
49 void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register tmpReg,
50 Register tmp2Reg, Register tmp3Reg) {
51 Register oop = objectReg;
52 Register box = boxReg;
53 Register disp_hdr = tmpReg;
54 Register tmp = tmp2Reg;
55 Label cont;
56 Label object_has_monitor;
57 Label count, no_count;
58
59 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
60 assert_different_registers(oop, box, tmp, disp_hdr, rscratch2);
61
62 // Load markWord from object into displaced_header.
63 ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
64
65 if (DiagnoseSyncOnValueBasedClasses != 0) {
66 load_klass(tmp, oop);
67 ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
68 tst(tmp, KlassFlags::_misc_is_value_based_class);
69 br(Assembler::NE, cont);
70 }
71
72 // Check for existing monitor
73 tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
74
75 if (LockingMode == LM_MONITOR) {
76 tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
77 b(cont);
78 } else {
79 assert(LockingMode == LM_LEGACY, "must be");
80 // Set tmp to be (markWord of object | UNLOCK_VALUE).
93 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
94
95 // If the compare-and-exchange succeeded, then we found an unlocked
96 // object, will have now locked it will continue at label cont
97
98 // Check if the owner is self by comparing the value in the
99 // markWord of object (disp_hdr) with the stack pointer.
100 mov(rscratch1, sp);
101 sub(disp_hdr, disp_hdr, rscratch1);
102 mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
103 // If condition is true we are cont and hence we can store 0 as the
104 // displaced header in the box, which indicates that it is a recursive lock.
105 ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
106 str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
107 b(cont);
108 }
109
110 // Handle existing monitor.
111 bind(object_has_monitor);
112
113 // Try to CAS owner (no owner => current thread's _lock_id).
114 ldr(rscratch2, Address(rthread, JavaThread::lock_id_offset()));
115 add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
116 cmpxchg(tmp, zr, rscratch2, Assembler::xword, /*acquire*/ true,
117 /*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
118
119 // Store a non-null value into the box to avoid looking like a re-entrant
120 // lock. The fast-path monitor unlock code checks for
121 // markWord::monitor_value so use markWord::unused_mark which has the
122 // relevant bit set, and also matches ObjectSynchronizer::enter.
123 mov(tmp, (address)markWord::unused_mark().value());
124 str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
125
126 br(Assembler::EQ, cont); // CAS success means locking succeeded
127
128 cmp(tmp3Reg, rscratch2);
129 br(Assembler::NE, cont); // Check for recursive locking
130
131 // Recursive lock case
132 increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
133 // flag == EQ still from the cmp above, checking if this is a reentrant lock
134
135 bind(cont);
136 // flag == EQ indicates success
137 // flag == NE indicates failure
138 br(Assembler::NE, no_count);
139
140 bind(count);
141 if (LockingMode == LM_LEGACY) {
142 inc_held_monitor_count(rscratch1);
143 }
144
145 bind(no_count);
146 }
147
148 void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Register tmpReg,
149 Register tmp2Reg) {
150 Register oop = objectReg;
151 Register box = boxReg;
152 Register disp_hdr = tmpReg;
153 Register owner_addr = tmpReg;
154 Register tmp = tmp2Reg;
155 Label cont;
156 Label object_has_monitor;
157 Label count, no_count;
158 Label unlocked;
159
160 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
161 assert_different_registers(oop, box, tmp, disp_hdr);
162
163 if (LockingMode == LM_LEGACY) {
230 br(Assembler::NE, unlocked); // If so we are done.
231
232 // Save the monitor pointer in the current thread, so we can try to
233 // reacquire the lock in SharedRuntime::monitor_exit_helper().
234 str(tmp, Address(rthread, JavaThread::unlocked_inflated_monitor_offset()));
235
236 cmp(zr, rthread); // Set Flag to NE => slow path
237 b(cont);
238
239 bind(unlocked);
240 cmp(zr, zr); // Set Flag to EQ => fast path
241
242 // Intentional fall-through
243
244 bind(cont);
245 // flag == EQ indicates success
246 // flag == NE indicates failure
247 br(Assembler::NE, no_count);
248
249 bind(count);
250 if (LockingMode == LM_LEGACY) {
251 dec_held_monitor_count(rscratch1);
252 }
253
254 bind(no_count);
255 }
256
257 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1,
258 Register t2, Register t3) {
259 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
260 assert_different_registers(obj, box, t1, t2, t3, rscratch2);
261
262 // Handle inflated monitor.
263 Label inflated;
264 // Finish fast lock successfully. MUST branch to with flag == EQ
265 Label locked;
266 // Finish fast lock unsuccessfully. MUST branch to with flag == NE
267 Label slow_path;
268
269 if (UseObjectMonitorTable) {
270 // Clear cache in case fast locking succeeds.
271 str(zr, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
272 }
273
274 if (DiagnoseSyncOnValueBasedClasses != 0) {
275 load_klass(t1, obj);
276 ldrb(t1, Address(t1, Klass::misc_flags_offset()));
277 tst(t1, KlassFlags::_misc_is_value_based_class);
278 br(Assembler::NE, slow_path);
279 }
280
356 increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
357 cbnz(t1, loop);
358 // Cache Miss, NE set from cmp above, cbnz does not set flags
359 b(slow_path);
360
361 bind(monitor_found);
362 ldr(t1_monitor, Address(t3_t, OMCache::oop_to_monitor_difference()));
363 }
364
365 const Register t2_owner_addr = t2;
366 const Register t3_owner = t3;
367 const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast<int>(markWord::monitor_value));
368 const Address owner_address(t1_monitor, ObjectMonitor::owner_offset() - monitor_tag);
369 const Address recursions_address(t1_monitor, ObjectMonitor::recursions_offset() - monitor_tag);
370
371 Label monitor_locked;
372
373 // Compute owner address.
374 lea(t2_owner_addr, owner_address);
375
376 // Try to CAS owner (no owner => current thread's _lock_id).
377 ldr(rscratch2, Address(rthread, JavaThread::lock_id_offset()));
378 cmpxchg(t2_owner_addr, zr, rscratch2, Assembler::xword, /*acquire*/ true,
379 /*release*/ false, /*weak*/ false, t3_owner);
380 br(Assembler::EQ, monitor_locked);
381
382 // Check if recursive.
383 cmp(t3_owner, rscratch2);
384 br(Assembler::NE, slow_path);
385
386 // Recursive.
387 increment(recursions_address, 1);
388
389 bind(monitor_locked);
390 if (UseObjectMonitorTable) {
391 str(t1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
392 }
393 }
394
395 bind(locked);
396
397 #ifdef ASSERT
398 // Check that locked label is reached with Flags == EQ.
399 Label flag_correct;
400 br(Assembler::EQ, flag_correct);
401 stop("Fast Lock Flag != EQ");
402 #endif
403
404 bind(slow_path);
405 #ifdef ASSERT
406 // Check that slow_path label is reached with Flags == NE.
407 br(Assembler::NE, flag_correct);
408 stop("Fast Lock Flag != NE");
409 bind(flag_correct);
410 #endif
411 // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
412 }
413
414 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register t1,
415 Register t2, Register t3) {
544 ldr(rscratch1, Address(t1_monitor, ObjectMonitor::EntryList_offset()));
545 ldr(t3_t, Address(t1_monitor, ObjectMonitor::cxq_offset()));
546 orr(rscratch1, rscratch1, t3_t);
547 cmp(rscratch1, zr);
548 br(Assembler::EQ, unlocked); // If so we are done.
549
550 // Check if there is a successor.
551 ldr(rscratch1, Address(t1_monitor, ObjectMonitor::succ_offset()));
552 cmp(rscratch1, zr);
553 br(Assembler::NE, unlocked); // If so we are done.
554
555 // Save the monitor pointer in the current thread, so we can try to
556 // reacquire the lock in SharedRuntime::monitor_exit_helper().
557 str(t1_monitor, Address(rthread, JavaThread::unlocked_inflated_monitor_offset()));
558
559 cmp(zr, rthread); // Set Flag to NE => slow path
560 b(slow_path);
561 }
562
563 bind(unlocked);
564 cmp(zr, zr); // Set Flags to EQ => fast path
565
566 #ifdef ASSERT
567 // Check that unlocked label is reached with Flags == EQ.
568 Label flag_correct;
569 br(Assembler::EQ, flag_correct);
570 stop("Fast Unlock Flag != EQ");
571 #endif
572
573 bind(slow_path);
574 #ifdef ASSERT
575 // Check that slow_path label is reached with Flags == NE.
576 br(Assembler::NE, flag_correct);
577 stop("Fast Unlock Flag != NE");
578 bind(flag_correct);
579 #endif
580 // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
581 }
582
583 // Search for str1 in str2 and return index or -1
|