163 cmpxchgptr(disp_hdr, hdr, obj, rscratch2, done, &slow_case);
164 }
165 // done
166 bind(done);
167 }
168 decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
169 }
170
171
172 // Defines obj, preserves var_size_in_bytes
173 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
174 if (UseTLAB) {
175 tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
176 } else {
177 b(slow_case);
178 }
179 }
180
181 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
182 assert_different_registers(obj, klass, len);
183 // This assumes that all prototype bits fit in an int32_t
184 mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
185 str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
186
187 if (UseCompressedClassPointers) { // Take care not to kill klass
188 encode_klass_not_null(t1, klass);
189 strw(t1, Address(obj, oopDesc::klass_offset_in_bytes()));
190 } else {
191 str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
192 }
193
194 if (len->is_valid()) {
195 strw(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
196 } else if (UseCompressedClassPointers) {
197 store_klass_gap(obj, zr);
198 }
199 }
200
201 // preserves obj, destroys len_in_bytes
202 //
203 // Scratch registers: t1 = r10, t2 = r11
204 //
205 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1, Register t2) {
206 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0");
207 assert(t1 == r10 && t2 == r11, "must be");
208
209 Label done;
210
211 // len_in_bytes is positive and ptr sized
212 subs(len_in_bytes, len_in_bytes, hdr_size_in_bytes);
213 br(Assembler::EQ, done);
214
215 // zero_words() takes ptr in r10 and count in words in r11
216 mov(rscratch1, len_in_bytes);
217 lea(t1, Address(obj, hdr_size_in_bytes));
218 lsr(t2, rscratch1, LogBytesPerWord);
219 address tpc = zero_words(t1, t2);
220
221 bind(done);
222 if (tpc == nullptr) {
223 Compilation::current()->bailout("no space for trampoline stub");
224 }
225 }
226
227
228 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
229 assert_different_registers(obj, t1, t2); // XXX really?
230 assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
231
232 try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
233
234 initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2, UseTLAB);
254 } else if (con_size_in_bytes > hdr_size_in_bytes) {
255 con_size_in_bytes -= hdr_size_in_bytes;
256 lea(t1, Address(obj, hdr_size_in_bytes));
257 address tpc = zero_words(t1, con_size_in_bytes / BytesPerWord);
258 if (tpc == nullptr) {
259 Compilation::current()->bailout("no space for trampoline stub");
260 return;
261 }
262 }
263 }
264
265 membar(StoreStore);
266
267 if (CURRENT_ENV->dtrace_alloc_probes()) {
268 assert(obj == r0, "must be");
269 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
270 }
271
272 verify_oop(obj);
273 }
274 void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, int f, Register klass, Label& slow_case) {
275 assert_different_registers(obj, len, t1, t2, klass);
276
277 // determine alignment mask
278 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
279
280 // check for negative or excessive length
281 mov(rscratch1, (int32_t)max_array_allocation_length);
282 cmp(len, rscratch1);
283 br(Assembler::HS, slow_case);
284
285 const Register arr_size = t2; // okay to be the same
286 // align object end
287 mov(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
288 add(arr_size, arr_size, len, ext::uxtw, f);
289 andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
290
291 try_allocate(obj, arr_size, 0, t1, t2, slow_case);
292
293 initialize_header(obj, klass, len, t1, t2);
294
295 // clear rest of allocated space
296 initialize_body(obj, arr_size, header_size * BytesPerWord, t1, t2);
297 if (Compilation::current()->bailed_out()) {
298 return;
299 }
300
301 membar(StoreStore);
302
303 if (CURRENT_ENV->dtrace_alloc_probes()) {
304 assert(obj == r0, "must be");
305 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
306 }
307
308 verify_oop(obj);
309 }
310
311
312 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
313 verify_oop(receiver);
314 // explicit null check not needed since load from [klass_offset] causes a trap
315 // check against inline cache
316 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
317
318 cmp_klass(receiver, iCache, rscratch1);
319 }
320
321
322 void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
323 assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
324 // Make sure there is enough stack space for this method's activation.
325 // Note that we do this before creating a frame.
326 generate_stack_overflow_check(bang_size_in_bytes);
327 MacroAssembler::build_frame(framesize);
328
329 // Insert nmethod entry barrier into frame.
330 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
331 bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
332 }
333
334 void C1_MacroAssembler::remove_frame(int framesize) {
335 MacroAssembler::remove_frame(framesize);
336 }
|
163 cmpxchgptr(disp_hdr, hdr, obj, rscratch2, done, &slow_case);
164 }
165 // done
166 bind(done);
167 }
168 decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
169 }
170
171
172 // Defines obj, preserves var_size_in_bytes
173 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
174 if (UseTLAB) {
175 tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
176 } else {
177 b(slow_case);
178 }
179 }
180
181 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
182 assert_different_registers(obj, klass, len);
183 if (UseCompactObjectHeaders) {
184 ldr(t1, Address(klass, Klass::prototype_header_offset()));
185 str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
186 } else {
187 // This assumes that all prototype bits fit in an int32_t
188 mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
189 str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
190
191 if (UseCompressedClassPointers) { // Take care not to kill klass
192 encode_klass_not_null(t1, klass);
193 strw(t1, Address(obj, oopDesc::klass_offset_in_bytes()));
194 } else {
195 str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
196 }
197 }
198
199 if (len->is_valid()) {
200 strw(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
201 } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
202 store_klass_gap(obj, zr);
203 }
204 }
205
206 // preserves obj, destroys len_in_bytes
207 //
208 // Scratch registers: t1 = r10, t2 = r11
209 //
210 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1, Register t2) {
211 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0");
212 assert(t1 == r10 && t2 == r11, "must be");
213
214 Label done;
215
216 // len_in_bytes is positive and ptr sized
217 subs(len_in_bytes, len_in_bytes, hdr_size_in_bytes);
218 br(Assembler::EQ, done);
219
220 // Zero first 4 bytes, if start offset is not word aligned.
221 if (!is_aligned(hdr_size_in_bytes, BytesPerWord)) {
222 strw(zr, Address(obj, hdr_size_in_bytes));
223 hdr_size_in_bytes += BytesPerInt;
224 }
225
226 // zero_words() takes ptr in r10 and count in words in r11
227 mov(rscratch1, len_in_bytes);
228 lea(t1, Address(obj, hdr_size_in_bytes));
229 lsr(t2, rscratch1, LogBytesPerWord);
230 address tpc = zero_words(t1, t2);
231
232 bind(done);
233 if (tpc == nullptr) {
234 Compilation::current()->bailout("no space for trampoline stub");
235 }
236 }
237
238
239 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
240 assert_different_registers(obj, t1, t2); // XXX really?
241 assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
242
243 try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
244
245 initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2, UseTLAB);
265 } else if (con_size_in_bytes > hdr_size_in_bytes) {
266 con_size_in_bytes -= hdr_size_in_bytes;
267 lea(t1, Address(obj, hdr_size_in_bytes));
268 address tpc = zero_words(t1, con_size_in_bytes / BytesPerWord);
269 if (tpc == nullptr) {
270 Compilation::current()->bailout("no space for trampoline stub");
271 return;
272 }
273 }
274 }
275
276 membar(StoreStore);
277
278 if (CURRENT_ENV->dtrace_alloc_probes()) {
279 assert(obj == r0, "must be");
280 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
281 }
282
283 verify_oop(obj);
284 }
285 void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) {
286 assert_different_registers(obj, len, t1, t2, klass);
287
288 // determine alignment mask
289 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
290
291 // check for negative or excessive length
292 mov(rscratch1, (int32_t)max_array_allocation_length);
293 cmp(len, rscratch1);
294 br(Assembler::HS, slow_case);
295
296 const Register arr_size = t2; // okay to be the same
297 // align object end
298 mov(arr_size, (int32_t)base_offset_in_bytes + MinObjAlignmentInBytesMask);
299 add(arr_size, arr_size, len, ext::uxtw, f);
300 andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
301
302 try_allocate(obj, arr_size, 0, t1, t2, slow_case);
303
304 initialize_header(obj, klass, len, t1, t2);
305
306 // clear rest of allocated space
307 initialize_body(obj, arr_size, base_offset_in_bytes, t1, t2);
308 if (Compilation::current()->bailed_out()) {
309 return;
310 }
311
312 membar(StoreStore);
313
314 if (CURRENT_ENV->dtrace_alloc_probes()) {
315 assert(obj == r0, "must be");
316 far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
317 }
318
319 verify_oop(obj);
320 }
321
322
323 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
324 verify_oop(receiver);
325 // explicit null check not needed since load from [klass_offset] causes a trap
326 // check against inline cache
327 if (UseCompactObjectHeaders) {
328 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::mark_offset_in_bytes()), "must add explicit null check");
329 } else {
330 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
331 }
332
333 cmp_klass(receiver, iCache, rscratch1);
334 }
335
336
337 void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
338 assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
339 // Make sure there is enough stack space for this method's activation.
340 // Note that we do this before creating a frame.
341 generate_stack_overflow_check(bang_size_in_bytes);
342 MacroAssembler::build_frame(framesize);
343
344 // Insert nmethod entry barrier into frame.
345 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
346 bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
347 }
348
349 void C1_MacroAssembler::remove_frame(int framesize) {
350 MacroAssembler::remove_frame(framesize);
351 }
|