161 void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
162 assert_different_registers(Rmark, Roop, Rbox);
163
164 Label slow_int, done;
165
166 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
167 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
168
169 if (LockingMode != LM_LIGHTWEIGHT) {
170 // Test first if it is a fast recursive unlock.
171 ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
172 cmpdi(CCR0, Rmark, 0);
173 beq(CCR0, done);
174 }
175
176 // Load object.
177 ld(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
178 verify_oop(Roop, FILE_AND_LINE);
179
180 if (LockingMode == LM_LIGHTWEIGHT) {
181 ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
182 andi_(R0, Rmark, markWord::monitor_value);
183 bne(CCR0, slow_int);
184 lightweight_unlock(Roop, Rmark, slow_int);
185 } else if (LockingMode == LM_LEGACY) {
186 // Check if it is still a light weight lock, this is is true if we see
187 // the stack address of the basicLock in the markWord of the object.
188 cmpxchgd(/*flag=*/CCR0,
189 /*current_value=*/R0,
190 /*compare_value=*/Rbox,
191 /*exchange_value=*/Rmark,
192 /*where=*/Roop,
193 MacroAssembler::MemBarRel,
194 MacroAssembler::cmpxchgx_hint_release_lock(),
195 noreg,
196 &slow_int);
197 }
198 b(done);
199 bind(slow_int);
200 b(slow_case); // far
201
202 // Done
203 bind(done);
316 initialize_body(obj, t1, t2, con_size_in_bytes, hdr_size_in_bytes);
317 }
318
319 if (CURRENT_ENV->dtrace_alloc_probes()) {
320 Unimplemented();
321 // assert(obj == O0, "must be");
322 // call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
323 // relocInfo::runtime_call_type);
324 }
325
326 verify_oop(obj, FILE_AND_LINE);
327 }
328
329
330 void C1_MacroAssembler::allocate_array(
331 Register obj, // result: pointer to array after successful allocation
332 Register len, // array length
333 Register t1, // temp register
334 Register t2, // temp register
335 Register t3, // temp register
336 int hdr_size, // object header size in words
337 int elt_size, // element size in bytes
338 Register klass, // object klass
339 Label& slow_case // continuation point if fast allocation fails
340 ) {
341 assert_different_registers(obj, len, t1, t2, t3, klass);
342
343 // Determine alignment mask.
344 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
345 int log2_elt_size = exact_log2(elt_size);
346
347 // Check for negative or excessive length.
348 size_t max_length = max_array_allocation_length >> log2_elt_size;
349 if (UseTLAB) {
350 size_t max_tlab = align_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
351 if (max_tlab < max_length) { max_length = max_tlab; }
352 }
353 load_const_optimized(t1, max_length);
354 cmpld(CCR0, len, t1);
355 bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::greater), slow_case);
356
357 // compute array size
358 // note: If 0 <= len <= max_length, len*elt_size + header + alignment is
359 // smaller or equal to the largest integer; also, since top is always
360 // aligned, we can do the alignment here instead of at the end address
361 // computation.
362 const Register arr_size = t1;
363 Register arr_len_in_bytes = len;
364 if (elt_size != 1) {
365 sldi(t1, len, log2_elt_size);
366 arr_len_in_bytes = t1;
367 }
368 addi(arr_size, arr_len_in_bytes, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment.
369 clrrdi(arr_size, arr_size, LogMinObjAlignmentInBytes); // Align array size.
370
371 // Allocate space & initialize header.
372 try_allocate(obj, arr_size, 0, t2, t3, slow_case);
373 initialize_header(obj, klass, len, t2, t3);
374
375 // Initialize body.
376 const Register base = t2;
377 const Register index = t3;
378 addi(base, obj, hdr_size * wordSize); // compute address of first element
379 addi(index, arr_size, -(hdr_size * wordSize)); // compute index = number of bytes to clear
380 initialize_body(base, index);
381
382 if (CURRENT_ENV->dtrace_alloc_probes()) {
383 Unimplemented();
384 //assert(obj == O0, "must be");
385 //call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
386 // relocInfo::runtime_call_type);
387 }
388
389 verify_oop(obj, FILE_AND_LINE);
390 }
391
392
393 #ifndef PRODUCT
394
395 void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
396 verify_oop_addr((RegisterOrConstant)stack_offset, R1_SP, "broken oop in stack slot");
397 }
398
399 void C1_MacroAssembler::verify_not_null_oop(Register r) {
|
161 void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
162 assert_different_registers(Rmark, Roop, Rbox);
163
164 Label slow_int, done;
165
166 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
167 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
168
169 if (LockingMode != LM_LIGHTWEIGHT) {
170 // Test first if it is a fast recursive unlock.
171 ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
172 cmpdi(CCR0, Rmark, 0);
173 beq(CCR0, done);
174 }
175
176 // Load object.
177 ld(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
178 verify_oop(Roop, FILE_AND_LINE);
179
180 if (LockingMode == LM_LIGHTWEIGHT) {
181 lightweight_unlock(Roop, Rmark, slow_int);
182 } else if (LockingMode == LM_LEGACY) {
183 // Check if it is still a light weight lock, this is is true if we see
184 // the stack address of the basicLock in the markWord of the object.
185 cmpxchgd(/*flag=*/CCR0,
186 /*current_value=*/R0,
187 /*compare_value=*/Rbox,
188 /*exchange_value=*/Rmark,
189 /*where=*/Roop,
190 MacroAssembler::MemBarRel,
191 MacroAssembler::cmpxchgx_hint_release_lock(),
192 noreg,
193 &slow_int);
194 }
195 b(done);
196 bind(slow_int);
197 b(slow_case); // far
198
199 // Done
200 bind(done);
313 initialize_body(obj, t1, t2, con_size_in_bytes, hdr_size_in_bytes);
314 }
315
316 if (CURRENT_ENV->dtrace_alloc_probes()) {
317 Unimplemented();
318 // assert(obj == O0, "must be");
319 // call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
320 // relocInfo::runtime_call_type);
321 }
322
323 verify_oop(obj, FILE_AND_LINE);
324 }
325
326
327 void C1_MacroAssembler::allocate_array(
328 Register obj, // result: pointer to array after successful allocation
329 Register len, // array length
330 Register t1, // temp register
331 Register t2, // temp register
332 Register t3, // temp register
333 int base_offset_in_bytes, // elements offset in bytes
334 int elt_size, // element size in bytes
335 Register klass, // object klass
336 Label& slow_case // continuation point if fast allocation fails
337 ) {
338 assert_different_registers(obj, len, t1, t2, t3, klass);
339
340 // Determine alignment mask.
341 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
342 int log2_elt_size = exact_log2(elt_size);
343
344 // Check for negative or excessive length.
345 size_t max_length = max_array_allocation_length >> log2_elt_size;
346 if (UseTLAB) {
347 size_t max_tlab = align_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
348 if (max_tlab < max_length) { max_length = max_tlab; }
349 }
350 load_const_optimized(t1, max_length);
351 cmpld(CCR0, len, t1);
352 bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::greater), slow_case);
353
354 // compute array size
355 // note: If 0 <= len <= max_length, len*elt_size + header + alignment is
356 // smaller or equal to the largest integer; also, since top is always
357 // aligned, we can do the alignment here instead of at the end address
358 // computation.
359 const Register arr_size = t1;
360 Register arr_len_in_bytes = len;
361 if (elt_size != 1) {
362 sldi(t1, len, log2_elt_size);
363 arr_len_in_bytes = t1;
364 }
365 addi(arr_size, arr_len_in_bytes, base_offset_in_bytes + MinObjAlignmentInBytesMask); // Add space for header & alignment.
366 clrrdi(arr_size, arr_size, LogMinObjAlignmentInBytes); // Align array size.
367
368 // Allocate space & initialize header.
369 try_allocate(obj, arr_size, 0, t2, t3, slow_case);
370 initialize_header(obj, klass, len, t2, t3);
371
372 // Initialize body.
373 const Register base = t2;
374 const Register index = t3;
375 addi(base, obj, base_offset_in_bytes); // compute address of first element
376 addi(index, arr_size, -(base_offset_in_bytes)); // compute index = number of bytes to clear
377
378 // Zero first 4 bytes, if start offset is not word aligned.
379 if (!is_aligned(base_offset_in_bytes, BytesPerWord)) {
380 assert(is_aligned(base_offset_in_bytes, BytesPerInt), "must be 4-byte aligned");
381 li(t1, 0);
382 stw(t1, 0, base);
383 addi(base, base, BytesPerInt);
384 // Note: initialize_body will align index down, no need to correct it here.
385 }
386
387 initialize_body(base, index);
388
389 if (CURRENT_ENV->dtrace_alloc_probes()) {
390 Unimplemented();
391 //assert(obj == O0, "must be");
392 //call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
393 // relocInfo::runtime_call_type);
394 }
395
396 verify_oop(obj, FILE_AND_LINE);
397 }
398
399
400 #ifndef PRODUCT
401
402 void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
403 verify_oop_addr((RegisterOrConstant)stack_offset, R1_SP, "broken oop in stack slot");
404 }
405
406 void C1_MacroAssembler::verify_not_null_oop(Register r) {
|