52 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
53 const int aligned_mask = BytesPerWord - 1;
54 const int hdr_offset = oopDesc::mark_offset_in_bytes();
55 assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1);
56 int null_check_offset = -1;
57
58 verify_oop(obj);
59
60 // save object being locked into the BasicObjectLock
61 sd(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
62
63 null_check_offset = offset();
64
65 if (DiagnoseSyncOnValueBasedClasses != 0) {
66 load_klass(hdr, obj);
67 lwu(hdr, Address(hdr, Klass::access_flags_offset()));
68 test_bit(temp, hdr, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
69 bnez(temp, slow_case, true /* is_far */);
70 }
71
72 // Load object header
73 ld(hdr, Address(obj, hdr_offset));
74
75 if (LockingMode == LM_LIGHTWEIGHT) {
76 lightweight_lock(obj, hdr, temp, t1, slow_case);
77 } else if (LockingMode == LM_LEGACY) {
78 Label done;
79 // and mark it as unlocked
80 ori(hdr, hdr, markWord::unlocked_value);
81 // save unlocked object header into the displaced header location on the stack
82 sd(hdr, Address(disp_hdr, 0));
83 // test if object header is still the same (i.e. unlocked), and if so, store the
84 // displaced header address in the object header - if it is not the same, get the
85 // object header instead
86 la(temp, Address(obj, hdr_offset));
87 cmpxchgptr(hdr, disp_hdr, temp, t1, done, /*fallthough*/nullptr);
88 // if the object header was the same, we're done
89 // if the object header was not the same, it is now in the hdr register
90 // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
91 //
92 // 1) (hdr & aligned_mask) == 0
93 // 2) sp <= hdr
94 // 3) hdr <= sp + page_size
95 //
96 // these 3 tests can be done by evaluating the following expression:
97 //
98 // (hdr -sp) & (aligned_mask - page_size)
117
118 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
119 const int aligned_mask = BytesPerWord - 1;
120 const int hdr_offset = oopDesc::mark_offset_in_bytes();
121 assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1);
122 Label done;
123
124 if (LockingMode != LM_LIGHTWEIGHT) {
125 // load displaced header
126 ld(hdr, Address(disp_hdr, 0));
127 // if the loaded hdr is null we had recursive locking
128 // if we had recursive locking, we are done
129 beqz(hdr, done);
130 }
131
132 // load object
133 ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
134 verify_oop(obj);
135
136 if (LockingMode == LM_LIGHTWEIGHT) {
137 ld(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
138 test_bit(temp, hdr, exact_log2(markWord::monitor_value));
139 bnez(temp, slow_case, /* is_far */ true);
140 lightweight_unlock(obj, hdr, temp, t1, slow_case);
141 } else if (LockingMode == LM_LEGACY) {
142 // test if object header is pointing to the displaced header, and if so, restore
143 // the displaced header in the object - if the object header is not pointing to
144 // the displaced header, get the object header instead
145 // if the object header was not pointing to the displaced header,
146 // we do unlocking via runtime call
147 if (hdr_offset) {
148 la(temp, Address(obj, hdr_offset));
149 cmpxchgptr(disp_hdr, hdr, temp, t1, done, &slow_case);
150 } else {
151 cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
152 }
153 // done
154 bind(done);
155 }
156
157 decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
158 }
159
164 } else {
165 j(slow_case);
166 }
167 }
168
169 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2) {
170 assert_different_registers(obj, klass, len, tmp1, tmp2);
171 // This assumes that all prototype bits fitr in an int32_t
172 mv(tmp1, (int32_t)(intptr_t)markWord::prototype().value());
173 sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes()));
174
175 if (UseCompressedClassPointers) { // Take care not to kill klass
176 encode_klass_not_null(tmp1, klass, tmp2);
177 sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes()));
178 } else {
179 sd(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
180 }
181
182 if (len->is_valid()) {
183 sw(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
184 } else if (UseCompressedClassPointers) {
185 store_klass_gap(obj, zr);
186 }
187 }
188
189 // preserves obj, destroys len_in_bytes
190 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register tmp) {
191 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0");
192 Label done;
193
194 // len_in_bytes is positive and ptr sized
195 sub(len_in_bytes, len_in_bytes, hdr_size_in_bytes);
196 beqz(len_in_bytes, done);
197
198 // Preserve obj
199 if (hdr_size_in_bytes) {
200 add(obj, obj, hdr_size_in_bytes);
201 }
202 zero_memory(obj, len_in_bytes, tmp);
203 if (hdr_size_in_bytes) {
263 sd(zr, Address(t0, i * wordSize));
264 }
265 if (remainder == 0) {
266 bind(entry_point);
267 }
268 add(t0, t0, unroll * wordSize);
269 bnez(index, loop);
270 }
271 }
272
273 membar(MacroAssembler::StoreStore);
274
275 if (CURRENT_ENV->dtrace_alloc_probes()) {
276 assert(obj == x10, "must be");
277 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
278 }
279
280 verify_oop(obj);
281 }
282
283 void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int header_size, int f, Register klass, Label& slow_case) {
284 assert_different_registers(obj, len, tmp1, tmp2, klass);
285
286 // determine alignment mask
287 assert(!(BytesPerWord & 1), "must be multiple of 2 for masking code to work");
288
289 // check for negative or excessive length
290 mv(t0, (int32_t)max_array_allocation_length);
291 bgeu(len, t0, slow_case, /* is_far */ true);
292
293 const Register arr_size = tmp2; // okay to be the same
294 // align object end
295 mv(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
296 shadd(arr_size, len, arr_size, t0, f);
297 andi(arr_size, arr_size, ~(uint)MinObjAlignmentInBytesMask);
298
299 try_allocate(obj, arr_size, 0, tmp1, tmp2, slow_case);
300
301 initialize_header(obj, klass, len, tmp1, tmp2);
302
303 // clear rest of allocated space
304 const Register len_zero = len;
305 initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
306
307 membar(MacroAssembler::StoreStore);
308
309 if (CURRENT_ENV->dtrace_alloc_probes()) {
310 assert(obj == x10, "must be");
311 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
312 }
313
314 verify_oop(obj);
315 }
316
317 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
318 verify_oop(receiver);
319 // explicit null check not needed since load from [klass_offset] causes a trap
320 // check against inline cache
321 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
322 assert_different_registers(receiver, iCache, t0, t2);
323 cmp_klass(receiver, iCache, t0, t2 /* call-clobbered t2 as a tmp */, L);
324 }
325
|
52 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
53 const int aligned_mask = BytesPerWord - 1;
54 const int hdr_offset = oopDesc::mark_offset_in_bytes();
55 assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1);
56 int null_check_offset = -1;
57
58 verify_oop(obj);
59
60 // save object being locked into the BasicObjectLock
61 sd(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
62
63 null_check_offset = offset();
64
65 if (DiagnoseSyncOnValueBasedClasses != 0) {
66 load_klass(hdr, obj);
67 lwu(hdr, Address(hdr, Klass::access_flags_offset()));
68 test_bit(temp, hdr, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
69 bnez(temp, slow_case, true /* is_far */);
70 }
71
72 if (LockingMode == LM_LIGHTWEIGHT) {
73 lightweight_lock(obj, hdr, temp, t1, slow_case);
74 } else if (LockingMode == LM_LEGACY) {
75 Label done;
76 // Load object header
77 ld(hdr, Address(obj, hdr_offset));
78 // and mark it as unlocked
79 ori(hdr, hdr, markWord::unlocked_value);
80 // save unlocked object header into the displaced header location on the stack
81 sd(hdr, Address(disp_hdr, 0));
82 // test if object header is still the same (i.e. unlocked), and if so, store the
83 // displaced header address in the object header - if it is not the same, get the
84 // object header instead
85 la(temp, Address(obj, hdr_offset));
86 cmpxchgptr(hdr, disp_hdr, temp, t1, done, /*fallthough*/nullptr);
87 // if the object header was the same, we're done
88 // if the object header was not the same, it is now in the hdr register
89 // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
90 //
91 // 1) (hdr & aligned_mask) == 0
92 // 2) sp <= hdr
93 // 3) hdr <= sp + page_size
94 //
95 // these 3 tests can be done by evaluating the following expression:
96 //
97 // (hdr -sp) & (aligned_mask - page_size)
116
117 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
118 const int aligned_mask = BytesPerWord - 1;
119 const int hdr_offset = oopDesc::mark_offset_in_bytes();
120 assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1);
121 Label done;
122
123 if (LockingMode != LM_LIGHTWEIGHT) {
124 // load displaced header
125 ld(hdr, Address(disp_hdr, 0));
126 // if the loaded hdr is null we had recursive locking
127 // if we had recursive locking, we are done
128 beqz(hdr, done);
129 }
130
131 // load object
132 ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
133 verify_oop(obj);
134
135 if (LockingMode == LM_LIGHTWEIGHT) {
136 lightweight_unlock(obj, hdr, temp, t1, slow_case);
137 } else if (LockingMode == LM_LEGACY) {
138 // test if object header is pointing to the displaced header, and if so, restore
139 // the displaced header in the object - if the object header is not pointing to
140 // the displaced header, get the object header instead
141 // if the object header was not pointing to the displaced header,
142 // we do unlocking via runtime call
143 if (hdr_offset) {
144 la(temp, Address(obj, hdr_offset));
145 cmpxchgptr(disp_hdr, hdr, temp, t1, done, &slow_case);
146 } else {
147 cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
148 }
149 // done
150 bind(done);
151 }
152
153 decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
154 }
155
160 } else {
161 j(slow_case);
162 }
163 }
164
165 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2) {
166 assert_different_registers(obj, klass, len, tmp1, tmp2);
167 // This assumes that all prototype bits fitr in an int32_t
168 mv(tmp1, (int32_t)(intptr_t)markWord::prototype().value());
169 sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes()));
170
171 if (UseCompressedClassPointers) { // Take care not to kill klass
172 encode_klass_not_null(tmp1, klass, tmp2);
173 sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes()));
174 } else {
175 sd(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
176 }
177
178 if (len->is_valid()) {
179 sw(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
180 if (!is_aligned(arrayOopDesc::header_size_in_bytes(), BytesPerWord)) {
181 assert(is_aligned(arrayOopDesc::header_size_in_bytes(), BytesPerInt), "must be 4-byte aligned");
182 sw(zr, Address(obj, arrayOopDesc::header_size_in_bytes()));
183 }
184 } else if (UseCompressedClassPointers) {
185 store_klass_gap(obj, zr);
186 }
187 }
188
189 // preserves obj, destroys len_in_bytes
190 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register tmp) {
191 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0");
192 Label done;
193
194 // len_in_bytes is positive and ptr sized
195 sub(len_in_bytes, len_in_bytes, hdr_size_in_bytes);
196 beqz(len_in_bytes, done);
197
198 // Preserve obj
199 if (hdr_size_in_bytes) {
200 add(obj, obj, hdr_size_in_bytes);
201 }
202 zero_memory(obj, len_in_bytes, tmp);
203 if (hdr_size_in_bytes) {
263 sd(zr, Address(t0, i * wordSize));
264 }
265 if (remainder == 0) {
266 bind(entry_point);
267 }
268 add(t0, t0, unroll * wordSize);
269 bnez(index, loop);
270 }
271 }
272
273 membar(MacroAssembler::StoreStore);
274
275 if (CURRENT_ENV->dtrace_alloc_probes()) {
276 assert(obj == x10, "must be");
277 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
278 }
279
280 verify_oop(obj);
281 }
282
283 void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) {
284 assert_different_registers(obj, len, tmp1, tmp2, klass);
285
286 // determine alignment mask
287 assert(!(BytesPerWord & 1), "must be multiple of 2 for masking code to work");
288
289 // check for negative or excessive length
290 mv(t0, (int32_t)max_array_allocation_length);
291 bgeu(len, t0, slow_case, /* is_far */ true);
292
293 const Register arr_size = tmp2; // okay to be the same
294 // align object end
295 mv(arr_size, (int32_t)base_offset_in_bytes + MinObjAlignmentInBytesMask);
296 shadd(arr_size, len, arr_size, t0, f);
297 andi(arr_size, arr_size, ~(uint)MinObjAlignmentInBytesMask);
298
299 try_allocate(obj, arr_size, 0, tmp1, tmp2, slow_case);
300
301 initialize_header(obj, klass, len, tmp1, tmp2);
302
303 // clear rest of allocated space
304 const Register len_zero = len;
305 // We align-up the header size to word-size, because we clear the
306 // possible alignment gap in initialize_header().
307 int hdr_size = align_up(base_offset_in_bytes, BytesPerWord);
308 initialize_body(obj, arr_size, hdr_size, len_zero);
309
310 membar(MacroAssembler::StoreStore);
311
312 if (CURRENT_ENV->dtrace_alloc_probes()) {
313 assert(obj == x10, "must be");
314 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
315 }
316
317 verify_oop(obj);
318 }
319
320 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
321 verify_oop(receiver);
322 // explicit null check not needed since load from [klass_offset] causes a trap
323 // check against inline cache
324 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
325 assert_different_registers(receiver, iCache, t0, t2);
326 cmp_klass(receiver, iCache, t0, t2 /* call-clobbered t2 as a tmp */, L);
327 }
328
|