35 #include "utilities/globalDefinitions.hpp"
36
37 #ifdef PRODUCT
38 #define BLOCK_COMMENT(str) /* nothing */
39 #define STOP(error) stop(error)
40 #else
41 #define BLOCK_COMMENT(str) block_comment(str)
42 #define STOP(error) block_comment(error); stop(error)
43 #endif
44
45 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
46
47 typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
48
49 void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register tmpReg,
50 Register tmp2Reg, Register tmp3Reg) {
51 Register oop = objectReg;
52 Register box = boxReg;
53 Register disp_hdr = tmpReg;
54 Register tmp = tmp2Reg;
55 Label cont;
56 Label object_has_monitor;
57 Label count, no_count;
58
59 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
60 assert_different_registers(oop, box, tmp, disp_hdr);
61
62 // Load markWord from object into displaced_header.
63 ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
64
65 if (DiagnoseSyncOnValueBasedClasses != 0) {
66 load_klass(tmp, oop);
67 ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
68 tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
69 br(Assembler::NE, cont);
70 }
71
72 // Check for existing monitor
73 tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
74
75 if (LockingMode == LM_MONITOR) {
76 tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
77 b(cont);
78 } else {
79 assert(LockingMode == LM_LEGACY, "must be");
80 // Set tmp to be (markWord of object | UNLOCK_VALUE).
81 orr(tmp, disp_hdr, markWord::unlocked_value);
82
83 // Initialize the box. (Must happen before we update the object mark!)
84 str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
85
86 // Compare object markWord with an unlocked value (tmp) and if
87 // equal exchange the stack address of our box with object markWord.
88 // On failure disp_hdr contains the possibly locked markWord.
89 cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
90 /*release*/ true, /*weak*/ false, disp_hdr);
91 br(Assembler::EQ, cont);
92
93 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
94
95 // If the compare-and-exchange succeeded, then we found an unlocked
96 // object, will have now locked it will continue at label cont
97
98 // Check if the owner is self by comparing the value in the
99 // markWord of object (disp_hdr) with the stack pointer.
100 mov(rscratch1, sp);
101 sub(disp_hdr, disp_hdr, rscratch1);
102 mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
103 // If condition is true we are cont and hence we can store 0 as the
104 // displaced header in the box, which indicates that it is a recursive lock.
105 ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
106 str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
107 b(cont);
108 }
109
110 // Handle existing monitor.
111 bind(object_has_monitor);
112
113 // The object's monitor m is unlocked iff m->owner == nullptr,
114 // otherwise m->owner may contain a thread or a stack address.
115 //
116 // Try to CAS m->owner from null to current thread.
117 add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
118 cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
119 /*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
120
121 // Store a non-null value into the box to avoid looking like a re-entrant
122 // lock. The fast-path monitor unlock code checks for
123 // markWord::monitor_value so use markWord::unused_mark which has the
124 // relevant bit set, and also matches ObjectSynchronizer::enter.
125 mov(tmp, (address)markWord::unused_mark().value());
126 str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
127
128 br(Assembler::EQ, cont); // CAS success means locking succeeded
129
130 cmp(tmp3Reg, rthread);
131 br(Assembler::NE, cont); // Check for recursive locking
132
133 // Recursive lock case
134 increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
135 // flag == EQ still from the cmp above, checking if this is a reentrant lock
136
137 bind(cont);
138 // flag == EQ indicates success
139 // flag == NE indicates failure
140 br(Assembler::NE, no_count);
141
142 bind(count);
143 increment(Address(rthread, JavaThread::held_monitor_count_offset()));
144
145 bind(no_count);
146 }
147
148 void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Register tmpReg,
149 Register tmp2Reg) {
150 Register oop = objectReg;
151 Register box = boxReg;
152 Register disp_hdr = tmpReg;
153 Register tmp = tmp2Reg;
154 Label cont;
155 Label object_has_monitor;
156 Label count, no_count;
157
158 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
159 assert_different_registers(oop, box, tmp, disp_hdr);
160
161 if (LockingMode == LM_LEGACY) {
162 // Find the lock address and load the displaced header from the stack.
163 ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
164
165 // If the displaced header is 0, we have a recursive unlock.
166 cmp(disp_hdr, zr);
167 br(Assembler::EQ, cont);
168 }
169
170 // Handle existing monitor.
171 ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
172 tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
173
174 if (LockingMode == LM_MONITOR) {
175 tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
176 b(cont);
177 } else {
178 assert(LockingMode == LM_LEGACY, "must be");
179 // Check if it is still a light weight lock, this is is true if we
180 // see the stack address of the basicLock in the markWord of the
181 // object.
182
183 cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
184 /*release*/ true, /*weak*/ false, tmp);
185 b(cont);
186 }
187
188 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
189
190 // Handle existing monitor.
191 bind(object_has_monitor);
192 STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
193 add(tmp, tmp, -(int)markWord::monitor_value); // monitor
194
195 ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
196
197 Label notRecursive;
198 cbz(disp_hdr, notRecursive);
199
200 // Recursive lock
201 sub(disp_hdr, disp_hdr, 1u);
202 str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
203 cmp(disp_hdr, disp_hdr); // Sets flags for result
204 b(cont);
205
206 bind(notRecursive);
207 ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
208 ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
209 orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
210 cmp(rscratch1, zr); // Sets flags for result
211 cbnz(rscratch1, cont);
212 // need a release store here
213 lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
214 stlr(zr, tmp); // set unowned
215
216 bind(cont);
217 // flag == EQ indicates success
218 // flag == NE indicates failure
219 br(Assembler::NE, no_count);
220
221 bind(count);
222 decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
223
224 bind(no_count);
225 }
226
227 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1,
228 Register t2, Register t3) {
229 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
230 assert_different_registers(obj, t1, t2, t3);
231
232 // Handle inflated monitor.
233 Label inflated;
234 // Finish fast lock successfully. MUST branch to with flag == EQ
235 Label locked;
236 // Finish fast lock unsuccessfully. MUST branch to with flag == NE
237 Label slow_path;
238
239 if (DiagnoseSyncOnValueBasedClasses != 0) {
240 load_klass(t1, obj);
241 ldrw(t1, Address(t1, Klass::access_flags_offset()));
242 tstw(t1, JVM_ACC_IS_VALUE_BASED_CLASS);
281 bind(push);
282 // After successful lock, push object on lock-stack.
283 str(obj, Address(rthread, t2_top));
284 addw(t2_top, t2_top, oopSize);
285 strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
286 b(locked);
287 }
288
289 { // Handle inflated monitor.
290 bind(inflated);
291
292 // mark contains the tagged ObjectMonitor*.
293 const Register t1_tagged_monitor = t1_mark;
294 const uintptr_t monitor_tag = markWord::monitor_value;
295 const Register t2_owner_addr = t2;
296 const Register t3_owner = t3;
297
298 // Compute owner address.
299 lea(t2_owner_addr, Address(t1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag)));
300
301 // CAS owner (null => current thread).
302 cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true,
303 /*release*/ false, /*weak*/ false, t3_owner);
304 br(Assembler::EQ, locked);
305
306 // Check if recursive.
307 cmp(t3_owner, rthread);
308 br(Assembler::NE, slow_path);
309
310 // Recursive.
311 increment(Address(t1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1);
312 }
313
314 bind(locked);
315 increment(Address(rthread, JavaThread::held_monitor_count_offset()));
316
317 #ifdef ASSERT
318 // Check that locked label is reached with Flags == EQ.
319 Label flag_correct;
320 br(Assembler::EQ, flag_correct);
321 stop("Fast Lock Flag != EQ");
322 #endif
323
324 bind(slow_path);
325 #ifdef ASSERT
326 // Check that slow_path label is reached with Flags == NE.
327 br(Assembler::NE, flag_correct);
328 stop("Fast Lock Flag != NE");
329 bind(flag_correct);
330 #endif
331 // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
332 }
333
334 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Register t2,
335 Register t3) {
433 cmp(t2_recursions, t2_recursions);
434 b(unlocked);
435
436 bind(not_recursive);
437
438 Label release;
439 const Register t2_owner_addr = t2;
440
441 // Compute owner address.
442 lea(t2_owner_addr, Address(t1_monitor, ObjectMonitor::owner_offset()));
443
444 // Check if the entry lists are empty.
445 ldr(rscratch1, Address(t1_monitor, ObjectMonitor::EntryList_offset()));
446 ldr(t3_t, Address(t1_monitor, ObjectMonitor::cxq_offset()));
447 orr(rscratch1, rscratch1, t3_t);
448 cmp(rscratch1, zr);
449 br(Assembler::EQ, release);
450
451 // The owner may be anonymous and we removed the last obj entry in
452 // the lock-stack. This loses the information about the owner.
453 // Write the thread to the owner field so the runtime knows the owner.
454 str(rthread, Address(t2_owner_addr));
455 b(slow_path);
456
457 bind(release);
458 // Set owner to null.
459 // Release to satisfy the JMM
460 stlr(zr, t2_owner_addr);
461 }
462
463 bind(unlocked);
464 decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
465
466 #ifdef ASSERT
467 // Check that unlocked label is reached with Flags == EQ.
468 Label flag_correct;
469 br(Assembler::EQ, flag_correct);
470 stop("Fast Unlock Flag != EQ");
471 #endif
472
473 bind(slow_path);
474 #ifdef ASSERT
475 // Check that slow_path label is reached with Flags == NE.
476 br(Assembler::NE, flag_correct);
477 stop("Fast Unlock Flag != NE");
478 bind(flag_correct);
479 #endif
480 // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
481 }
482
483 // Search for str1 in str2 and return index or -1
484 // Clobbers: rscratch1, rscratch2, rflags. May also clobber v0-v1, when icnt1==-1.
|
35 #include "utilities/globalDefinitions.hpp"
36
37 #ifdef PRODUCT
38 #define BLOCK_COMMENT(str) /* nothing */
39 #define STOP(error) stop(error)
40 #else
41 #define BLOCK_COMMENT(str) block_comment(str)
42 #define STOP(error) block_comment(error); stop(error)
43 #endif
44
45 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
46
47 typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
48
49 void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register tmpReg,
50 Register tmp2Reg, Register tmp3Reg) {
51 Register oop = objectReg;
52 Register box = boxReg;
53 Register disp_hdr = tmpReg;
54 Register tmp = tmp2Reg;
55 Label object_has_monitor;
56 Label count, no_count;
57
58 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
59 assert_different_registers(oop, box, tmp, disp_hdr, rscratch1);
60
61 // Load markWord from object into displaced_header.
62 ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
63
64 if (DiagnoseSyncOnValueBasedClasses != 0) {
65 load_klass(tmp, oop);
66 ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
67 tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
68 br(Assembler::NE, no_count);
69 }
70
71 // Check for existing monitor
72 tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
73
74 if (LockingMode == LM_MONITOR) {
75 tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
76 b(no_count);
77 } else {
78 assert(LockingMode == LM_LEGACY, "must be");
79 // Set tmp to be (markWord of object | UNLOCK_VALUE).
80 orr(tmp, disp_hdr, markWord::unlocked_value);
81
82 // Initialize the box. (Must happen before we update the object mark!)
83 str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
84
85 // Compare object markWord with an unlocked value (tmp) and if
86 // equal exchange the stack address of our box with object markWord.
87 // On failure disp_hdr contains the possibly locked markWord.
88 cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
89 /*release*/ true, /*weak*/ false, disp_hdr);
90 br(Assembler::EQ, count);
91
92 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
93
94 // If the compare-and-exchange succeeded, then we found an unlocked
95 // object, will have now locked it will continue at label cont
96
97 // Check if the owner is self by comparing the value in the
98 // markWord of object (disp_hdr) with the stack pointer.
99 mov(rscratch1, sp);
100 sub(disp_hdr, disp_hdr, rscratch1);
101 mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
102 // If condition is true we are cont and hence we can store 0 as the
103 // displaced header in the box, which indicates that it is a recursive lock.
104 ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
105 str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
106 b(no_count);
107 }
108
109 // Handle existing monitor.
110 bind(object_has_monitor);
111
112 // The object's monitor m is unlocked iff m->owner == nullptr,
113 // otherwise m->owner may contain a thread id, a stack address for LM_LEGACY,
114 // or the ANONYMOUS_OWNER constant for LM_LIGHTWEIGHT.
115 //
116 // Try to CAS m->owner from null to current thread.
117 ldr(rscratch2, Address(rthread, JavaThread::lock_id_offset()));
118 add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
119 cmpxchg(tmp, zr, rscratch2, Assembler::xword, /*acquire*/ true,
120 /*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
121
122 // Store a non-null value into the box to avoid looking like a re-entrant
123 // lock. The fast-path monitor unlock code checks for
124 // markWord::monitor_value so use markWord::unused_mark which has the
125 // relevant bit set, and also matches ObjectSynchronizer::enter.
126 mov(tmp, (address)markWord::unused_mark().value());
127 str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
128
129 br(Assembler::EQ, no_count); // CAS success means locking succeeded
130
131 cmp(tmp3Reg, rscratch2);
132 br(Assembler::NE, no_count); // Check for recursive locking
133
134 // Recursive lock case
135 increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
136 // flag == EQ still from the cmp above, checking if this is a reentrant lock
137 b(no_count);
138
139 bind(count);
140 inc_held_monitor_count();
141
142 bind(no_count);
143 // flag == EQ indicates success
144 // flag == NE indicates failure
145 }
146
147 void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Register tmpReg,
148 Register tmp2Reg) {
149 Register oop = objectReg;
150 Register box = boxReg;
151 Register disp_hdr = tmpReg;
152 Register tmp = tmp2Reg;
153 Label cont;
154 Label object_has_monitor;
155 Label no_count;
156
157 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
158 assert_different_registers(oop, box, tmp, disp_hdr);
159
160 if (LockingMode == LM_LEGACY) {
161 // Find the lock address and load the displaced header from the stack.
162 ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
163
164 // If the displaced header is 0, we have a recursive unlock.
165 cmp(disp_hdr, zr);
166 br(Assembler::EQ, no_count);
167 }
168
169 // Handle existing monitor.
170 ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
171 tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
172
173 if (LockingMode == LM_MONITOR) {
174 tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
175 b(no_count);
176 } else {
177 assert(LockingMode == LM_LEGACY, "must be");
178 // Check if it is still a light weight lock, this is is true if we
179 // see the stack address of the basicLock in the markWord of the
180 // object.
181
182 cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
183 /*release*/ true, /*weak*/ false, tmp);
184 b(cont);
185 }
186
187 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
188
189 // Handle existing monitor.
190 bind(object_has_monitor);
191 STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
192 add(tmp, tmp, -(int)markWord::monitor_value); // monitor
193
194 // If the owner is anonymous, we need to fix it -- in an outline stub.
195 Register tmp2 = disp_hdr;
196 ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset()));
197 // We cannot use tbnz here, the target might be too far away and cannot
198 // be encoded.
199 mov(rscratch1, (uint64_t)ObjectMonitor::ANONYMOUS_OWNER);
200 cmp(tmp2, rscratch1);
201 C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
202 Compile::current()->output()->add_stub(stub);
203 br(Assembler::EQ, stub->entry());
204 bind(stub->continuation());
205
206 ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
207
208 Label notRecursive;
209 cbz(disp_hdr, notRecursive);
210
211 // Recursive lock
212 sub(disp_hdr, disp_hdr, 1u);
213 str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
214 cmp(disp_hdr, disp_hdr); // Sets flags for result
215 b(no_count);
216
217 bind(notRecursive);
218 ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
219 ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
220 orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
221 cmp(rscratch1, zr); // Sets flags for result
222 cbnz(rscratch1, no_count);
223 // need a release store here
224 lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
225 stlr(zr, tmp); // set unowned
226 b(no_count);
227
228 bind(cont);
229 // flag == EQ indicates success
230 // flag == NE indicates failure
231 br(Assembler::NE, no_count);
232
233 dec_held_monitor_count();
234
235 bind(no_count);
236 }
237
238 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1,
239 Register t2, Register t3) {
240 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
241 assert_different_registers(obj, t1, t2, t3);
242
243 // Handle inflated monitor.
244 Label inflated;
245 // Finish fast lock successfully. MUST branch to with flag == EQ
246 Label locked;
247 // Finish fast lock unsuccessfully. MUST branch to with flag == NE
248 Label slow_path;
249
250 if (DiagnoseSyncOnValueBasedClasses != 0) {
251 load_klass(t1, obj);
252 ldrw(t1, Address(t1, Klass::access_flags_offset()));
253 tstw(t1, JVM_ACC_IS_VALUE_BASED_CLASS);
292 bind(push);
293 // After successful lock, push object on lock-stack.
294 str(obj, Address(rthread, t2_top));
295 addw(t2_top, t2_top, oopSize);
296 strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
297 b(locked);
298 }
299
300 { // Handle inflated monitor.
301 bind(inflated);
302
303 // mark contains the tagged ObjectMonitor*.
304 const Register t1_tagged_monitor = t1_mark;
305 const uintptr_t monitor_tag = markWord::monitor_value;
306 const Register t2_owner_addr = t2;
307 const Register t3_owner = t3;
308
309 // Compute owner address.
310 lea(t2_owner_addr, Address(t1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag)));
311
312 // CAS owner (null => current thread id).
313 ldr(rscratch2, Address(rthread, JavaThread::lock_id_offset()));
314 cmpxchg(t2_owner_addr, zr, rscratch2, Assembler::xword, /*acquire*/ true,
315 /*release*/ false, /*weak*/ false, t3_owner);
316 br(Assembler::EQ, locked);
317
318 // Check if recursive.
319 cmp(t3_owner, rscratch2);
320 br(Assembler::NE, slow_path);
321
322 // Recursive.
323 increment(Address(t1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1);
324 }
325
326 bind(locked);
327
328 #ifdef ASSERT
329 // Check that locked label is reached with Flags == EQ.
330 Label flag_correct;
331 br(Assembler::EQ, flag_correct);
332 stop("Fast Lock Flag != EQ");
333 #endif
334
335 bind(slow_path);
336 #ifdef ASSERT
337 // Check that slow_path label is reached with Flags == NE.
338 br(Assembler::NE, flag_correct);
339 stop("Fast Lock Flag != NE");
340 bind(flag_correct);
341 #endif
342 // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
343 }
344
345 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Register t2,
346 Register t3) {
444 cmp(t2_recursions, t2_recursions);
445 b(unlocked);
446
447 bind(not_recursive);
448
449 Label release;
450 const Register t2_owner_addr = t2;
451
452 // Compute owner address.
453 lea(t2_owner_addr, Address(t1_monitor, ObjectMonitor::owner_offset()));
454
455 // Check if the entry lists are empty.
456 ldr(rscratch1, Address(t1_monitor, ObjectMonitor::EntryList_offset()));
457 ldr(t3_t, Address(t1_monitor, ObjectMonitor::cxq_offset()));
458 orr(rscratch1, rscratch1, t3_t);
459 cmp(rscratch1, zr);
460 br(Assembler::EQ, release);
461
462 // The owner may be anonymous and we removed the last obj entry in
463 // the lock-stack. This loses the information about the owner.
464 // Write the thread id to the owner field so the runtime knows the owner.
465 ldr(t3_t, Address(rthread, JavaThread::lock_id_offset()));
466 str(t3_t, Address(t2_owner_addr));
467 b(slow_path);
468
469 bind(release);
470 // Set owner to null.
471 // Release to satisfy the JMM
472 stlr(zr, t2_owner_addr);
473 }
474
475 bind(unlocked);
476
477 #ifdef ASSERT
478 // Check that unlocked label is reached with Flags == EQ.
479 Label flag_correct;
480 br(Assembler::EQ, flag_correct);
481 stop("Fast Unlock Flag != EQ");
482 #endif
483
484 bind(slow_path);
485 #ifdef ASSERT
486 // Check that slow_path label is reached with Flags == NE.
487 br(Assembler::NE, flag_correct);
488 stop("Fast Unlock Flag != NE");
489 bind(flag_correct);
490 #endif
491 // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
492 }
493
494 // Search for str1 in str2 and return index or -1
495 // Clobbers: rscratch1, rscratch2, rflags. May also clobber v0-v1, when icnt1==-1.
|