< prev index next >

src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp

Print this page

 207   ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
 208   ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
 209   orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
 210   cmp(rscratch1, zr); // Sets flags for result
 211   cbnz(rscratch1, cont);
 212   // need a release store here
 213   lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
 214   stlr(zr, tmp); // set unowned
 215 
 216   bind(cont);
 217   // flag == EQ indicates success
 218   // flag == NE indicates failure
 219   br(Assembler::NE, no_count);
 220 
 221   bind(count);
 222   decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
 223 
 224   bind(no_count);
 225 }
 226 
 227 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1,
 228                                               Register t2, Register t3) {
 229   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 230   assert_different_registers(obj, t1, t2, t3);
 231 
 232   // Handle inflated monitor.
 233   Label inflated;
 234   // Finish fast lock successfully. MUST branch to with flag == EQ
 235   Label locked;
 236   // Finish fast lock unsuccessfully. MUST branch to with flag == NE
 237   Label slow_path;
 238 





 239   if (DiagnoseSyncOnValueBasedClasses != 0) {
 240     load_klass(t1, obj);
 241     ldrw(t1, Address(t1, Klass::access_flags_offset()));
 242     tstw(t1, JVM_ACC_IS_VALUE_BASED_CLASS);
 243     br(Assembler::NE, slow_path);
 244   }
 245 
 246   const Register t1_mark = t1;

 247 
 248   { // Lightweight locking
 249 
 250     // Push lock to the lock stack and finish successfully. MUST branch to with flag == EQ
 251     Label push;
 252 
 253     const Register t2_top = t2;
 254     const Register t3_t = t3;
 255 
 256     // Check if lock-stack is full.
 257     ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 258     cmpw(t2_top, (unsigned)LockStack::end_offset() - 1);
 259     br(Assembler::GT, slow_path);
 260 
 261     // Check if recursive.
 262     subw(t3_t, t2_top, oopSize);
 263     ldr(t3_t, Address(rthread, t3_t));
 264     cmp(obj, t3_t);
 265     br(Assembler::EQ, push);
 266 
 267     // Relaxed normal load to check for monitor. Optimization for monitor case.
 268     ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
 269     tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
 270 
 271     // Not inflated
 272     assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid a lea");
 273 
 274     // Try to lock. Transition lock-bits 0b01 => 0b00
 275     orr(t1_mark, t1_mark, markWord::unlocked_value);
 276     eor(t3_t, t1_mark, markWord::unlocked_value);
 277     cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword,
 278             /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
 279     br(Assembler::NE, slow_path);
 280 
 281     bind(push);
 282     // After successful lock, push object on lock-stack.
 283     str(obj, Address(rthread, t2_top));
 284     addw(t2_top, t2_top, oopSize);
 285     strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 286     b(locked);
 287   }
 288 
 289   { // Handle inflated monitor.
 290     bind(inflated);
 291 
 292     // mark contains the tagged ObjectMonitor*.
 293     const Register t1_tagged_monitor = t1_mark;
 294     const uintptr_t monitor_tag = markWord::monitor_value;








































 295     const Register t2_owner_addr = t2;
 296     const Register t3_owner = t3;





 297 
 298     // Compute owner address.
 299     lea(t2_owner_addr, Address(t1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag)));
 300 
 301     // CAS owner (null => current thread).
 302     cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true,
 303             /*release*/ false, /*weak*/ false, t3_owner);
 304     br(Assembler::EQ, locked);
 305 
 306     // Check if recursive.
 307     cmp(t3_owner, rthread);
 308     br(Assembler::NE, slow_path);
 309 
 310     // Recursive.
 311     increment(Address(t1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1);





 312   }
 313 
 314   bind(locked);
 315   increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 316 
 317 #ifdef ASSERT
 318   // Check that locked label is reached with Flags == EQ.
 319   Label flag_correct;
 320   br(Assembler::EQ, flag_correct);
 321   stop("Fast Lock Flag != EQ");
 322 #endif
 323 
 324   bind(slow_path);
 325 #ifdef ASSERT
 326   // Check that slow_path label is reached with Flags == NE.
 327   br(Assembler::NE, flag_correct);
 328   stop("Fast Lock Flag != NE");
 329   bind(flag_correct);
 330 #endif
 331   // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
 332 }
 333 
 334 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Register t2,
 335                                                 Register t3) {
 336   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 337   assert_different_registers(obj, t1, t2, t3);
 338 
 339   // Handle inflated monitor.
 340   Label inflated, inflated_load_monitor;
 341   // Finish fast unlock successfully. MUST branch to with flag == EQ
 342   Label unlocked;
 343   // Finish fast unlock unsuccessfully. MUST branch to with flag == NE
 344   Label slow_path;
 345 
 346   const Register t1_mark = t1;
 347   const Register t2_top = t2;
 348   const Register t3_t = t3;
 349 
 350   { // Lightweight unlock
 351 


 352     // Check if obj is top of lock-stack.
 353     ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 354     subw(t2_top, t2_top, oopSize);
 355     ldr(t3_t, Address(rthread, t2_top));
 356     cmp(obj, t3_t);
 357     // Top of lock stack was not obj. Must be monitor.
 358     br(Assembler::NE, inflated_load_monitor);
 359 
 360     // Pop lock-stack.
 361     DEBUG_ONLY(str(zr, Address(rthread, t2_top));)
 362     strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 363 
 364     // Check if recursive.
 365     subw(t3_t, t2_top, oopSize);
 366     ldr(t3_t, Address(rthread, t3_t));
 367     cmp(obj, t3_t);
 368     br(Assembler::EQ, unlocked);
 369 
 370     // Not recursive.
 371     // Load Mark.
 372     ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
 373 
 374     // Check header for monitor (0b10).
 375     tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);



 376 
 377     // Try to unlock. Transition lock bits 0b00 => 0b01
 378     assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
 379     orr(t3_t, t1_mark, markWord::unlocked_value);
 380     cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword,
 381             /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
 382     br(Assembler::EQ, unlocked);
 383 

 384     // Compare and exchange failed.
 385     // Restore lock-stack and handle the unlock in runtime.
 386     DEBUG_ONLY(str(obj, Address(rthread, t2_top));)
 387     addw(t2_top, t2_top, oopSize);
 388     str(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 389     b(slow_path);
 390   }
 391 
 392 
 393   { // Handle inflated monitor.
 394     bind(inflated_load_monitor);
 395     ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
 396 #ifdef ASSERT
 397     tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
 398     stop("Fast Unlock not monitor");
 399 #endif
 400 
 401     bind(inflated);
 402 
 403 #ifdef ASSERT
 404     Label check_done;
 405     subw(t2_top, t2_top, oopSize);
 406     cmpw(t2_top, in_bytes(JavaThread::lock_stack_base_offset()));
 407     br(Assembler::LT, check_done);
 408     ldr(t3_t, Address(rthread, t2_top));
 409     cmp(obj, t3_t);
 410     br(Assembler::NE, inflated);
 411     stop("Fast Unlock lock on stack");
 412     bind(check_done);
 413 #endif
 414 
 415     // mark contains the tagged ObjectMonitor*.
 416     const Register t1_monitor = t1_mark;
 417     const uintptr_t monitor_tag = markWord::monitor_value;
 418 
 419     // Untag the monitor.
 420     sub(t1_monitor, t1_mark, monitor_tag);









 421 
 422     const Register t2_recursions = t2;
 423     Label not_recursive;
 424 
 425     // Check if recursive.
 426     ldr(t2_recursions, Address(t1_monitor, ObjectMonitor::recursions_offset()));
 427     cbz(t2_recursions, not_recursive);
 428 
 429     // Recursive unlock.
 430     sub(t2_recursions, t2_recursions, 1u);
 431     str(t2_recursions, Address(t1_monitor, ObjectMonitor::recursions_offset()));
 432     // Set flag == EQ
 433     cmp(t2_recursions, t2_recursions);
 434     b(unlocked);
 435 
 436     bind(not_recursive);
 437 
 438     Label release;
 439     const Register t2_owner_addr = t2;
 440 

2477       sve_and(vtmp, T, min_jlong);
2478       sve_orr(vtmp, T, jlong_cast(1.0));
2479       break;
2480     default:
2481       assert(false, "unsupported");
2482       ShouldNotReachHere();
2483     }
2484     sve_sel(dst, T, pgtmp, vtmp, src); // Select either from src or vtmp based on the predicate register pgtmp
2485                                        // Result in dst
2486 }
2487 
2488 bool C2_MacroAssembler::in_scratch_emit_size() {
2489   if (ciEnv::current()->task() != nullptr) {
2490     PhaseOutput* phase_output = Compile::current()->output();
2491     if (phase_output != nullptr && phase_output->in_scratch_emit_size()) {
2492       return true;
2493     }
2494   }
2495   return MacroAssembler::in_scratch_emit_size();
2496 }




















 207   ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
 208   ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
 209   orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
 210   cmp(rscratch1, zr); // Sets flags for result
 211   cbnz(rscratch1, cont);
 212   // need a release store here
 213   lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
 214   stlr(zr, tmp); // set unowned
 215 
 216   bind(cont);
 217   // flag == EQ indicates success
 218   // flag == NE indicates failure
 219   br(Assembler::NE, no_count);
 220 
 221   bind(count);
 222   decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
 223 
 224   bind(no_count);
 225 }
 226 
 227 void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1,
 228                                               Register t2, Register t3) {
 229   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 230   assert_different_registers(obj, box, t1, t2, t3);
 231 
 232   // Handle inflated monitor.
 233   Label inflated;
 234   // Finish fast lock successfully. MUST branch to with flag == EQ
 235   Label locked;
 236   // Finish fast lock unsuccessfully. MUST branch to with flag == NE
 237   Label slow_path;
 238 
 239   if (UseObjectMonitorTable) {
 240     // Clear cache in case fast locking succeeds.
 241     str(zr, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
 242   }
 243 
 244   if (DiagnoseSyncOnValueBasedClasses != 0) {
 245     load_klass(t1, obj);
 246     ldrw(t1, Address(t1, Klass::access_flags_offset()));
 247     tstw(t1, JVM_ACC_IS_VALUE_BASED_CLASS);
 248     br(Assembler::NE, slow_path);
 249   }
 250 
 251   const Register t1_mark = t1;
 252   const Register t3_t = t3;
 253 
 254   { // Lightweight locking
 255 
 256     // Push lock to the lock stack and finish successfully. MUST branch to with flag == EQ
 257     Label push;
 258 
 259     const Register t2_top = t2;

 260 
 261     // Check if lock-stack is full.
 262     ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 263     cmpw(t2_top, (unsigned)LockStack::end_offset() - 1);
 264     br(Assembler::GT, slow_path);
 265 
 266     // Check if recursive.
 267     subw(t3_t, t2_top, oopSize);
 268     ldr(t3_t, Address(rthread, t3_t));
 269     cmp(obj, t3_t);
 270     br(Assembler::EQ, push);
 271 
 272     // Relaxed normal load to check for monitor. Optimization for monitor case.
 273     ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
 274     tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
 275 
 276     // Not inflated
 277     assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid a lea");
 278 
 279     // Try to lock. Transition lock-bits 0b01 => 0b00
 280     orr(t1_mark, t1_mark, markWord::unlocked_value);
 281     eor(t3_t, t1_mark, markWord::unlocked_value);
 282     cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword,
 283             /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
 284     br(Assembler::NE, slow_path);
 285 
 286     bind(push);
 287     // After successful lock, push object on lock-stack.
 288     str(obj, Address(rthread, t2_top));
 289     addw(t2_top, t2_top, oopSize);
 290     strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 291     b(locked);
 292   }
 293 
 294   { // Handle inflated monitor.
 295     bind(inflated);
 296 
 297     const Register t1_monitor = t1;
 298 
 299     if (!UseObjectMonitorTable) {
 300       assert(t1_monitor == t1_mark, "should be the same here");
 301     } else {
 302       Label monitor_found;
 303 
 304       // Load cache address
 305       lea(t3_t, Address(rthread, JavaThread::om_cache_oops_offset()));
 306 
 307       const int num_unrolled = 2;
 308       for (int i = 0; i < num_unrolled; i++) {
 309         ldr(t1, Address(t3_t));
 310         cmp(obj, t1);
 311         br(Assembler::EQ, monitor_found);
 312         if (i + 1 != num_unrolled) {
 313           increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
 314         }
 315       }
 316 
 317       // Loop after unrolling, advance iterator.
 318       increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
 319 
 320       Label loop;
 321 
 322       // Search for obj in cache.
 323       bind(loop);
 324 
 325       // Check for match.
 326       ldr(t1, Address(t3_t));
 327       cmp(obj, t1);
 328       br(Assembler::EQ, monitor_found);
 329 
 330       // Search until null encountered, guaranteed _null_sentinel at end.
 331       increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
 332       cbnz(t1, loop);
 333       // Cache Miss, NE set from cmp above, cbnz does not set flags
 334       b(slow_path);
 335 
 336       bind(monitor_found);
 337       ldr(t1_monitor, Address(t3_t, OMCache::oop_to_monitor_difference()));
 338     }
 339 
 340     const Register t2_owner_addr = t2;
 341     const Register t3_owner = t3;
 342     const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast<int>(markWord::monitor_value));
 343     const Address owner_address{t1_monitor, ObjectMonitor::owner_offset() - monitor_tag};
 344     const Address recursions_address{t1_monitor, ObjectMonitor::recursions_offset() - monitor_tag};
 345 
 346     Label monitor_locked;
 347 
 348     // Compute owner address.
 349     lea(t2_owner_addr, owner_address);
 350 
 351     // CAS owner (null => current thread).
 352     cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true,
 353             /*release*/ false, /*weak*/ false, t3_owner);
 354     br(Assembler::EQ, monitor_locked);
 355 
 356     // Check if recursive.
 357     cmp(t3_owner, rthread);
 358     br(Assembler::NE, slow_path);
 359 
 360     // Recursive.
 361     increment(recursions_address, 1);
 362 
 363     bind(monitor_locked);
 364     if (UseObjectMonitorTable) {
 365       str(t1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
 366     }
 367   }
 368 
 369   bind(locked);
 370   increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 371 
 372 #ifdef ASSERT
 373   // Check that locked label is reached with Flags == EQ.
 374   Label flag_correct;
 375   br(Assembler::EQ, flag_correct);
 376   stop("Fast Lock Flag != EQ");
 377 #endif
 378 
 379   bind(slow_path);
 380 #ifdef ASSERT
 381   // Check that slow_path label is reached with Flags == NE.
 382   br(Assembler::NE, flag_correct);
 383   stop("Fast Lock Flag != NE");
 384   bind(flag_correct);
 385 #endif
 386   // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
 387 }
 388 
 389 void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register t1,
 390                                                 Register t2, Register t3) {
 391   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 392   assert_different_registers(obj, box, t1, t2, t3);
 393 
 394   // Handle inflated monitor.
 395   Label inflated, inflated_load_mark;
 396   // Finish fast unlock successfully. MUST branch to with flag == EQ
 397   Label unlocked;
 398   // Finish fast unlock unsuccessfully. MUST branch to with flag == NE
 399   Label slow_path;
 400 
 401   const Register t1_mark = t1;
 402   const Register t2_top = t2;
 403   const Register t3_t = t3;
 404 
 405   { // Lightweight unlock
 406 
 407     Label push_and_slow_path;
 408 
 409     // Check if obj is top of lock-stack.
 410     ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 411     subw(t2_top, t2_top, oopSize);
 412     ldr(t3_t, Address(rthread, t2_top));
 413     cmp(obj, t3_t);
 414     // Top of lock stack was not obj. Must be monitor.
 415     br(Assembler::NE, inflated_load_mark);
 416 
 417     // Pop lock-stack.
 418     DEBUG_ONLY(str(zr, Address(rthread, t2_top));)
 419     strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 420 
 421     // Check if recursive.
 422     subw(t3_t, t2_top, oopSize);
 423     ldr(t3_t, Address(rthread, t3_t));
 424     cmp(obj, t3_t);
 425     br(Assembler::EQ, unlocked);
 426 
 427     // Not recursive.
 428     // Load Mark.
 429     ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
 430 
 431     // Check header for monitor (0b10).
 432     // Because we got here by popping (meaning we pushed in locked)
 433     // there will be no monitor in the box. So we need to push back the obj
 434     // so that the runtime can fix any potential anonymous owner.
 435     tbnz(t1_mark, exact_log2(markWord::monitor_value), UseObjectMonitorTable ? push_and_slow_path : inflated);
 436 
 437     // Try to unlock. Transition lock bits 0b00 => 0b01
 438     assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
 439     orr(t3_t, t1_mark, markWord::unlocked_value);
 440     cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword,
 441             /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
 442     br(Assembler::EQ, unlocked);
 443 
 444     bind(push_and_slow_path);
 445     // Compare and exchange failed.
 446     // Restore lock-stack and handle the unlock in runtime.
 447     DEBUG_ONLY(str(obj, Address(rthread, t2_top));)
 448     addw(t2_top, t2_top, oopSize);
 449     str(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
 450     b(slow_path);
 451   }
 452 
 453 
 454   { // Handle inflated monitor.
 455     bind(inflated_load_mark);
 456     ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
 457 #ifdef ASSERT
 458     tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
 459     stop("Fast Unlock not monitor");
 460 #endif
 461 
 462     bind(inflated);
 463 
 464 #ifdef ASSERT
 465     Label check_done;
 466     subw(t2_top, t2_top, oopSize);
 467     cmpw(t2_top, in_bytes(JavaThread::lock_stack_base_offset()));
 468     br(Assembler::LT, check_done);
 469     ldr(t3_t, Address(rthread, t2_top));
 470     cmp(obj, t3_t);
 471     br(Assembler::NE, inflated);
 472     stop("Fast Unlock lock on stack");
 473     bind(check_done);
 474 #endif
 475 
 476     const Register t1_monitor = t1;


 477 
 478     if (!UseObjectMonitorTable) {
 479       assert(t1_monitor == t1_mark, "should be the same here");
 480 
 481       // Untag the monitor.
 482       add(t1_monitor, t1_mark, -(int)markWord::monitor_value);
 483     } else {
 484       ldr(t1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
 485       // null check with Flags == NE, no valid pointer below alignof(ObjectMonitor*)
 486       cmp(t1_monitor, checked_cast<uint8_t>(alignof(ObjectMonitor*)));
 487       br(Assembler::LO, slow_path);
 488     }
 489 
 490     const Register t2_recursions = t2;
 491     Label not_recursive;
 492 
 493     // Check if recursive.
 494     ldr(t2_recursions, Address(t1_monitor, ObjectMonitor::recursions_offset()));
 495     cbz(t2_recursions, not_recursive);
 496 
 497     // Recursive unlock.
 498     sub(t2_recursions, t2_recursions, 1u);
 499     str(t2_recursions, Address(t1_monitor, ObjectMonitor::recursions_offset()));
 500     // Set flag == EQ
 501     cmp(t2_recursions, t2_recursions);
 502     b(unlocked);
 503 
 504     bind(not_recursive);
 505 
 506     Label release;
 507     const Register t2_owner_addr = t2;
 508 

2545       sve_and(vtmp, T, min_jlong);
2546       sve_orr(vtmp, T, jlong_cast(1.0));
2547       break;
2548     default:
2549       assert(false, "unsupported");
2550       ShouldNotReachHere();
2551     }
2552     sve_sel(dst, T, pgtmp, vtmp, src); // Select either from src or vtmp based on the predicate register pgtmp
2553                                        // Result in dst
2554 }
2555 
2556 bool C2_MacroAssembler::in_scratch_emit_size() {
2557   if (ciEnv::current()->task() != nullptr) {
2558     PhaseOutput* phase_output = Compile::current()->output();
2559     if (phase_output != nullptr && phase_output->in_scratch_emit_size()) {
2560       return true;
2561     }
2562   }
2563   return MacroAssembler::in_scratch_emit_size();
2564 }
2565 
2566 void C2_MacroAssembler::load_nklass_compact(Register dst, Register obj, Register index, int scale, int disp) {
2567   // Note: Don't clobber obj anywhere in that method!
2568 
2569   // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract
2570   // obj-start, so that we can load from the object's mark-word instead. Usually the address
2571   // comes as obj-start in obj and klass_offset_in_bytes in disp. However, sometimes C2
2572   // emits code that pre-computes obj-start + klass_offset_in_bytes into a register, and
2573   // then passes that register as obj and 0 in disp. The following code extracts the base
2574   // and offset to load the mark-word.
2575   int offset = oopDesc::mark_offset_in_bytes() + disp - oopDesc::klass_offset_in_bytes();
2576   if (index == noreg) {
2577     ldr(dst, Address(obj, offset));
2578   } else {
2579     lea(dst, Address(obj, index, Address::lsl(scale)));
2580     ldr(dst, Address(dst, offset));
2581   }
2582   lsr(dst, dst, markWord::klass_shift);
2583 }
< prev index next >