< prev index next >

src/hotspot/cpu/s390/macroAssembler_s390.cpp

Print this page

3243     // To do so, compare the value in the markWord (currentHeader) with the stack pointer.
3244     z_sgr(currentHeader, Z_SP);
3245     load_const_optimized(temp, (~(os::vm_page_size() - 1) | markWord::lock_mask_in_place));
3246 
3247     z_ngr(currentHeader, temp);
3248 
3249     // result zero: owner is self -> recursive lock. Indicate that by storing 0 in the box.
3250     // result not-zero: attempt failed. We don't hold the lock -> go for slow case.
3251 
3252     z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3253 
3254     z_bru(done);
3255   } else {
3256     assert(LockingMode == LM_LIGHTWEIGHT, "must be");
3257     lightweight_lock(oop, displacedHeader, temp, done);
3258     z_bru(done);
3259   }
3260 
3261   bind(object_has_monitor);
3262 
3263   Register zero = temp;
3264   Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
3265   // The object's monitor m is unlocked iff m->owner is null,
3266   // otherwise m->owner may contain a thread or a stack address.
3267 
3268   // Try to CAS m->owner from null to current thread.
3269   // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3270   // Otherwise, register zero is filled with the current owner.
3271   z_lghi(zero, 0);
3272   z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3273   if (LockingMode != LM_LIGHTWEIGHT) {
3274     // Store a non-null value into the box.
3275     z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3276   }
3277 
3278   z_bre(done); // acquired the lock for the first time.
3279 
3280   BLOCK_COMMENT("fast_path_recursive_lock {");
3281   // Check if we are already the owner (recursive lock)
3282   z_cgr(Z_thread, zero); // owner is stored in zero by "z_csg" above
3283   z_brne(done); // not a recursive lock
3284 
3285   // Current thread already owns the lock. Just increment recursion count.
3286   z_agsi(Address(monitor_tagged, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 1ll);
3287   z_cgr(zero, zero); // set the CC to EQUAL
3288   BLOCK_COMMENT("} fast_path_recursive_lock");







3289   bind(done);
3290 
3291   BLOCK_COMMENT("} compiler_fast_lock_object");
3292   // If locking was successful, CR should indicate 'EQ'.
3293   // The compiler or the native wrapper generates a branch to the runtime call
3294   // _complete_monitor_locking_Java.
3295 }
3296 
3297 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2) {
3298   Register displacedHeader = temp1;
3299   Register currentHeader = temp2;
3300   Register temp = temp1;
3301 
3302   const int hdr_offset = oopDesc::mark_offset_in_bytes();
3303 
3304   assert_different_registers(temp1, temp2, oop, box);
3305 
3306   Label done, object_has_monitor, not_recursive;
3307 
3308   BLOCK_COMMENT("compiler_fast_unlock_object {");

3329   } else if (LockingMode == LM_LEGACY) {
3330     // Check if it is still a lightweight lock, this is true if we see
3331     // the stack address of the basicLock in the markWord of the object
3332     // copy box to currentHeader such that csg does not kill it.
3333     z_lgr(currentHeader, box);
3334     z_csg(currentHeader, displacedHeader, hdr_offset, oop);
3335     z_bru(done); // csg sets CR as desired.
3336   } else {
3337     assert(LockingMode == LM_LIGHTWEIGHT, "must be");
3338 
3339     lightweight_unlock(oop, currentHeader, displacedHeader, done);
3340     z_bru(done);
3341   }
3342 
3343   // In case of LM_LIGHTWEIGHT, we may reach here with (temp & ObjectMonitor::ANONYMOUS_OWNER) != 0.
3344   // This is handled like owner thread mismatches: We take the slow path.
3345 
3346   // Handle existing monitor.
3347   bind(object_has_monitor);
3348 
3349   z_cg(Z_thread, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3350   z_brne(done);

3351 
3352   BLOCK_COMMENT("fast_path_recursive_unlock {");
3353   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3354   z_bre(not_recursive); // if 0 then jump, it's not recursive locking
3355 
3356   // Recursive inflated unlock
3357   z_agsi(Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), -1ll);
3358   z_cgr(currentHeader, currentHeader); // set the CC to EQUAL
3359   BLOCK_COMMENT("} fast_path_recursive_unlock");
3360   z_bru(done);
3361 
3362   bind(not_recursive);
3363 
3364   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3365   z_brne(done);
3366   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3367   z_brne(done);
3368   z_release();
3369   z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);






3370 
3371   bind(done);
3372 
3373   BLOCK_COMMENT("} compiler_fast_unlock_object");
3374   // flag == EQ indicates success
3375   // flag == NE indicates failure
3376 }
3377 
3378 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3379   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3380   bs->resolve_jobject(this, value, tmp1, tmp2);
3381 }
3382 
3383 // Last_Java_sp must comply to the rules in frame_s390.hpp.
3384 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3385   BLOCK_COMMENT("set_last_Java_frame {");
3386 
3387   // Always set last_Java_pc and flags first because once last_Java_sp
3388   // is visible has_last_Java_frame is true and users will look at the
3389   // rest of the fields. (Note: flags should always be zero before we

3508       // will provoke OS null exception if reg is null.
3509   }
3510 }
3511 
3512 //-------------------------------------
3513 //  Compressed Klass Pointers
3514 //-------------------------------------
3515 
3516 // Klass oop manipulations if compressed.
3517 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3518   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3519   address  base    = CompressedKlassPointers::base();
3520   int      shift   = CompressedKlassPointers::shift();
3521   bool     need_zero_extend = base != 0;
3522   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3523 
3524   BLOCK_COMMENT("cKlass encoder {");
3525 
3526 #ifdef ASSERT
3527   Label ok;
3528   z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3529   z_brc(Assembler::bcondAllZero, ok);
3530   // The plain disassembler does not recognize illtrap. It instead displays
3531   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3532   // the proper beginning of the next instruction.
3533   z_illtrap(0xee);
3534   z_illtrap(0xee);
3535   bind(ok);
3536 #endif
3537 
3538   // Scale down the incoming klass pointer first.
3539   // We then can be sure we calculate an offset that fits into 32 bit.
3540   // More generally speaking: all subsequent calculations are purely 32-bit.
3541   if (shift != 0) {
3542     assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3543     z_srlg(dst, current, shift);
3544     current = dst;
3545   }
3546 
3547   if (base != nullptr) {
3548     // Use scaled-down base address parts to match scaled-down klass pointer.
3549     unsigned int base_h = ((unsigned long)base)>>(32+shift);
3550     unsigned int base_l = (unsigned int)(((unsigned long)base)>>shift);
3551 
3552     // General considerations:
3553     //  - when calculating (current_h - base_h), all digits must cancel (become 0).
3554     //    Otherwise, we would end up with a compressed klass pointer which doesn't
3555     //    fit into 32-bit.
3556     //  - Only bit#33 of the difference could potentially be non-zero. For that
3557     //    to happen, (current_l < base_l) must hold. In this case, the subtraction
3558     //    will create a borrow out of bit#32, nicely killing bit#33.
3559     //  - With the above, we only need to consider current_l and base_l to
3560     //    calculate the result.
3561     //  - Both values are treated as unsigned. The unsigned subtraction is
3562     //    replaced by adding (unsigned) the 2's complement of the subtrahend.

3652   BLOCK_COMMENT("cKlass decoder (const size) {");
3653 
3654   if (shift != 0) { // Shift required?
3655     z_sllg(dst, dst, shift);
3656   }
3657   if (base != nullptr) {
3658     unsigned int base_h = ((unsigned long)base)>>32;
3659     unsigned int base_l = (unsigned int)((unsigned long)base);
3660     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3661       z_aih(dst, base_h);     // Base has no set bits in lower half.
3662     } else if ((base_h == 0) && (base_l != 0)) {
3663       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3664     } else {
3665       load_const(Z_R0, base); // Base has set bits everywhere.
3666       z_algr(dst, Z_R0);
3667     }
3668   }
3669 
3670 #ifdef ASSERT
3671   Label ok;
3672   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3673   z_brc(Assembler::bcondAllZero, ok);
3674   // The plain disassembler does not recognize illtrap. It instead displays
3675   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3676   // the proper beginning of the next instruction.
3677   z_illtrap(0xd1);
3678   z_illtrap(0xd1);
3679   bind(ok);
3680 #endif
3681   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
3682 
3683   BLOCK_COMMENT("} cKlass decoder (const size)");
3684 }
3685 
3686 // This variant of decode_klass_not_null() is for cases where
3687 //  1) the size of the generated instructions may vary
3688 //  2) the result is (potentially) stored in a register different from the source.
3689 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3690   address base  = CompressedKlassPointers::base();
3691   int     shift = CompressedKlassPointers::shift();
3692   assert(UseCompressedClassPointers, "only for compressed klass ptrs");

3699     z_sllg(dst, src, shift);
3700   } else {
3701     lgr_if_needed(dst, src);
3702   }
3703 
3704   if (base != nullptr) {
3705     unsigned int base_h = ((unsigned long)base)>>32;
3706     unsigned int base_l = (unsigned int)((unsigned long)base);
3707     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3708       z_aih(dst, base_h);     // Base has not set bits in lower half.
3709     } else if ((base_h == 0) && (base_l != 0)) {
3710       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3711     } else {
3712       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
3713       z_algr(dst, Z_R0);
3714     }
3715   }
3716 
3717 #ifdef ASSERT
3718   Label ok;
3719   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3720   z_brc(Assembler::bcondAllZero, ok);
3721   // The plain disassembler does not recognize illtrap. It instead displays
3722   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3723   // the proper beginning of the next instruction.
3724   z_illtrap(0xd2);
3725   z_illtrap(0xd2);
3726   bind(ok);
3727 #endif
3728   BLOCK_COMMENT("} cKlass decoder");
3729 }
3730 
3731 void MacroAssembler::load_klass(Register klass, Address mem) {
3732   if (UseCompressedClassPointers) {
3733     z_llgf(klass, mem);
3734     // Attention: no null check here!
3735     decode_klass_not_null(klass);
3736   } else {
3737     z_lg(klass, mem);
3738   }
3739 }

3768     } else {
3769       z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
3770     }
3771   }
3772 }
3773 
3774 // Compare klass ptr in memory against klass ptr in register.
3775 //
3776 // Rop1            - klass in register, always uncompressed.
3777 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
3778 // Rbase           - Base address of cKlass in memory.
3779 // maybenull       - True if Rop1 possibly is a null.
3780 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybenull) {
3781 
3782   BLOCK_COMMENT("compare klass ptr {");
3783 
3784   if (UseCompressedClassPointers) {
3785     const int shift = CompressedKlassPointers::shift();
3786     address   base  = CompressedKlassPointers::base();
3787 
3788     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");




3789     assert_different_registers(Rop1, Z_R0);
3790     assert_different_registers(Rop1, Rbase, Z_R1);
3791 
3792     // First encode register oop and then compare with cOop in memory.
3793     // This sequence saves an unnecessary cOop load and decode.
3794     if (base == nullptr) {
3795       if (shift == 0) {
3796         z_cl(Rop1, disp, Rbase);     // Unscaled
3797       } else {
3798         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
3799         z_cl(Z_R0, disp, Rbase);
3800       }
3801     } else {                         // HeapBased
3802 #ifdef ASSERT
3803       bool     used_R0 = true;
3804       bool     used_R1 = true;
3805 #endif
3806       Register current = Rop1;
3807       Label    done;
3808 

3243     // To do so, compare the value in the markWord (currentHeader) with the stack pointer.
3244     z_sgr(currentHeader, Z_SP);
3245     load_const_optimized(temp, (~(os::vm_page_size() - 1) | markWord::lock_mask_in_place));
3246 
3247     z_ngr(currentHeader, temp);
3248 
3249     // result zero: owner is self -> recursive lock. Indicate that by storing 0 in the box.
3250     // result not-zero: attempt failed. We don't hold the lock -> go for slow case.
3251 
3252     z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3253 
3254     z_bru(done);
3255   } else {
3256     assert(LockingMode == LM_LIGHTWEIGHT, "must be");
3257     lightweight_lock(oop, displacedHeader, temp, done);
3258     z_bru(done);
3259   }
3260 
3261   bind(object_has_monitor);
3262 
3263   if (!UseObjectMonitorTable) {
3264     Register zero = temp;
3265     Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
3266     // The object's monitor m is unlocked iff m->owner is null,
3267     // otherwise m->owner may contain a thread or a stack address.
3268 
3269     // Try to CAS m->owner from null to current thread.
3270     // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3271     // Otherwise, register zero is filled with the current owner.
3272     z_lghi(zero, 0);
3273     z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3274     if (LockingMode != LM_LIGHTWEIGHT) {
3275       // Store a non-null value into the box.
3276       z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3277     }
3278 
3279     z_bre(done); // acquired the lock for the first time.
3280 
3281     BLOCK_COMMENT("fast_path_recursive_lock {");
3282     // Check if we are already the owner (recursive lock)
3283     z_cgr(Z_thread, zero); // owner is stored in zero by "z_csg" above
3284     z_brne(done); // not a recursive lock
3285 
3286     // Current thread already owns the lock. Just increment recursion count.
3287     z_agsi(Address(monitor_tagged, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 1ll);
3288     z_cgr(zero, zero); // set the CC to EQUAL
3289     BLOCK_COMMENT("} fast_path_recursive_lock");
3290   } else {
3291     // OMCache lookup not supported yet. Take the slowpath.
3292     // Set flag to NE
3293     z_ltgr(oop, oop);
3294     z_bru(done);
3295   }
3296   bind(done);
3297 
3298   BLOCK_COMMENT("} compiler_fast_lock_object");
3299   // If locking was successful, CR should indicate 'EQ'.
3300   // The compiler or the native wrapper generates a branch to the runtime call
3301   // _complete_monitor_locking_Java.
3302 }
3303 
3304 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2) {
3305   Register displacedHeader = temp1;
3306   Register currentHeader = temp2;
3307   Register temp = temp1;
3308 
3309   const int hdr_offset = oopDesc::mark_offset_in_bytes();
3310 
3311   assert_different_registers(temp1, temp2, oop, box);
3312 
3313   Label done, object_has_monitor, not_recursive;
3314 
3315   BLOCK_COMMENT("compiler_fast_unlock_object {");

3336   } else if (LockingMode == LM_LEGACY) {
3337     // Check if it is still a lightweight lock, this is true if we see
3338     // the stack address of the basicLock in the markWord of the object
3339     // copy box to currentHeader such that csg does not kill it.
3340     z_lgr(currentHeader, box);
3341     z_csg(currentHeader, displacedHeader, hdr_offset, oop);
3342     z_bru(done); // csg sets CR as desired.
3343   } else {
3344     assert(LockingMode == LM_LIGHTWEIGHT, "must be");
3345 
3346     lightweight_unlock(oop, currentHeader, displacedHeader, done);
3347     z_bru(done);
3348   }
3349 
3350   // In case of LM_LIGHTWEIGHT, we may reach here with (temp & ObjectMonitor::ANONYMOUS_OWNER) != 0.
3351   // This is handled like owner thread mismatches: We take the slow path.
3352 
3353   // Handle existing monitor.
3354   bind(object_has_monitor);
3355 
3356   if (!UseObjectMonitorTable) {
3357     z_cg(Z_thread, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3358     z_brne(done);
3359 
3360     BLOCK_COMMENT("fast_path_recursive_unlock {");
3361     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3362     z_bre(not_recursive); // if 0 then jump, it's not recursive locking
3363 
3364     // Recursive inflated unlock
3365     z_agsi(Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), -1ll);
3366     z_cgr(currentHeader, currentHeader); // set the CC to EQUAL
3367     BLOCK_COMMENT("} fast_path_recursive_unlock");
3368     z_bru(done);
3369 
3370     bind(not_recursive);
3371 
3372     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3373     z_brne(done);
3374     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3375     z_brne(done);
3376     z_release();
3377     z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3378   } else {
3379     // OMCache lookup not supported yet. Take the slowpath.
3380     // Set flag to NE
3381     z_ltgr(oop, oop);
3382     z_bru(done);
3383   }
3384 
3385   bind(done);
3386 
3387   BLOCK_COMMENT("} compiler_fast_unlock_object");
3388   // flag == EQ indicates success
3389   // flag == NE indicates failure
3390 }
3391 
3392 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3393   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3394   bs->resolve_jobject(this, value, tmp1, tmp2);
3395 }
3396 
3397 // Last_Java_sp must comply to the rules in frame_s390.hpp.
3398 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3399   BLOCK_COMMENT("set_last_Java_frame {");
3400 
3401   // Always set last_Java_pc and flags first because once last_Java_sp
3402   // is visible has_last_Java_frame is true and users will look at the
3403   // rest of the fields. (Note: flags should always be zero before we

3522       // will provoke OS null exception if reg is null.
3523   }
3524 }
3525 
3526 //-------------------------------------
3527 //  Compressed Klass Pointers
3528 //-------------------------------------
3529 
3530 // Klass oop manipulations if compressed.
3531 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3532   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3533   address  base    = CompressedKlassPointers::base();
3534   int      shift   = CompressedKlassPointers::shift();
3535   bool     need_zero_extend = base != 0;
3536   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3537 
3538   BLOCK_COMMENT("cKlass encoder {");
3539 
3540 #ifdef ASSERT
3541   Label ok;
3542   z_tmll(current, CompressedKlassPointers::klass_alignment_in_bytes() - 1); // Check alignment.
3543   z_brc(Assembler::bcondAllZero, ok);
3544   // The plain disassembler does not recognize illtrap. It instead displays
3545   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3546   // the proper beginning of the next instruction.
3547   z_illtrap(0xee);
3548   z_illtrap(0xee);
3549   bind(ok);
3550 #endif
3551 
3552   // Scale down the incoming klass pointer first.
3553   // We then can be sure we calculate an offset that fits into 32 bit.
3554   // More generally speaking: all subsequent calculations are purely 32-bit.
3555   if (shift != 0) {

3556     z_srlg(dst, current, shift);
3557     current = dst;
3558   }
3559 
3560   if (base != nullptr) {
3561     // Use scaled-down base address parts to match scaled-down klass pointer.
3562     unsigned int base_h = ((unsigned long)base)>>(32+shift);
3563     unsigned int base_l = (unsigned int)(((unsigned long)base)>>shift);
3564 
3565     // General considerations:
3566     //  - when calculating (current_h - base_h), all digits must cancel (become 0).
3567     //    Otherwise, we would end up with a compressed klass pointer which doesn't
3568     //    fit into 32-bit.
3569     //  - Only bit#33 of the difference could potentially be non-zero. For that
3570     //    to happen, (current_l < base_l) must hold. In this case, the subtraction
3571     //    will create a borrow out of bit#32, nicely killing bit#33.
3572     //  - With the above, we only need to consider current_l and base_l to
3573     //    calculate the result.
3574     //  - Both values are treated as unsigned. The unsigned subtraction is
3575     //    replaced by adding (unsigned) the 2's complement of the subtrahend.

3665   BLOCK_COMMENT("cKlass decoder (const size) {");
3666 
3667   if (shift != 0) { // Shift required?
3668     z_sllg(dst, dst, shift);
3669   }
3670   if (base != nullptr) {
3671     unsigned int base_h = ((unsigned long)base)>>32;
3672     unsigned int base_l = (unsigned int)((unsigned long)base);
3673     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3674       z_aih(dst, base_h);     // Base has no set bits in lower half.
3675     } else if ((base_h == 0) && (base_l != 0)) {
3676       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3677     } else {
3678       load_const(Z_R0, base); // Base has set bits everywhere.
3679       z_algr(dst, Z_R0);
3680     }
3681   }
3682 
3683 #ifdef ASSERT
3684   Label ok;
3685   z_tmll(dst, CompressedKlassPointers::klass_alignment_in_bytes() - 1); // Check alignment.
3686   z_brc(Assembler::bcondAllZero, ok);
3687   // The plain disassembler does not recognize illtrap. It instead displays
3688   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3689   // the proper beginning of the next instruction.
3690   z_illtrap(0xd1);
3691   z_illtrap(0xd1);
3692   bind(ok);
3693 #endif
3694   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
3695 
3696   BLOCK_COMMENT("} cKlass decoder (const size)");
3697 }
3698 
3699 // This variant of decode_klass_not_null() is for cases where
3700 //  1) the size of the generated instructions may vary
3701 //  2) the result is (potentially) stored in a register different from the source.
3702 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3703   address base  = CompressedKlassPointers::base();
3704   int     shift = CompressedKlassPointers::shift();
3705   assert(UseCompressedClassPointers, "only for compressed klass ptrs");

3712     z_sllg(dst, src, shift);
3713   } else {
3714     lgr_if_needed(dst, src);
3715   }
3716 
3717   if (base != nullptr) {
3718     unsigned int base_h = ((unsigned long)base)>>32;
3719     unsigned int base_l = (unsigned int)((unsigned long)base);
3720     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3721       z_aih(dst, base_h);     // Base has not set bits in lower half.
3722     } else if ((base_h == 0) && (base_l != 0)) {
3723       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3724     } else {
3725       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
3726       z_algr(dst, Z_R0);
3727     }
3728   }
3729 
3730 #ifdef ASSERT
3731   Label ok;
3732   z_tmll(dst, CompressedKlassPointers::klass_alignment_in_bytes() - 1); // Check alignment.
3733   z_brc(Assembler::bcondAllZero, ok);
3734   // The plain disassembler does not recognize illtrap. It instead displays
3735   // a 32-bit value. Issuing two illtraps assures the disassembler finds
3736   // the proper beginning of the next instruction.
3737   z_illtrap(0xd2);
3738   z_illtrap(0xd2);
3739   bind(ok);
3740 #endif
3741   BLOCK_COMMENT("} cKlass decoder");
3742 }
3743 
3744 void MacroAssembler::load_klass(Register klass, Address mem) {
3745   if (UseCompressedClassPointers) {
3746     z_llgf(klass, mem);
3747     // Attention: no null check here!
3748     decode_klass_not_null(klass);
3749   } else {
3750     z_lg(klass, mem);
3751   }
3752 }

3781     } else {
3782       z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
3783     }
3784   }
3785 }
3786 
3787 // Compare klass ptr in memory against klass ptr in register.
3788 //
3789 // Rop1            - klass in register, always uncompressed.
3790 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
3791 // Rbase           - Base address of cKlass in memory.
3792 // maybenull       - True if Rop1 possibly is a null.
3793 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybenull) {
3794 
3795   BLOCK_COMMENT("compare klass ptr {");
3796 
3797   if (UseCompressedClassPointers) {
3798     const int shift = CompressedKlassPointers::shift();
3799     address   base  = CompressedKlassPointers::base();
3800 
3801     if (CompressedKlassPointers::tiny_classpointer_mode()) {
3802       assert(shift >= 3, "cKlass encoder detected bad shift");
3803     } else {
3804       assert((shift == 0) || (shift == 3), "cKlass encoder detected bad shift");
3805     }
3806     assert_different_registers(Rop1, Z_R0);
3807     assert_different_registers(Rop1, Rbase, Z_R1);
3808 
3809     // First encode register oop and then compare with cOop in memory.
3810     // This sequence saves an unnecessary cOop load and decode.
3811     if (base == nullptr) {
3812       if (shift == 0) {
3813         z_cl(Rop1, disp, Rbase);     // Unscaled
3814       } else {
3815         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
3816         z_cl(Z_R0, disp, Rbase);
3817       }
3818     } else {                         // HeapBased
3819 #ifdef ASSERT
3820       bool     used_R0 = true;
3821       bool     used_R1 = true;
3822 #endif
3823       Register current = Rop1;
3824       Label    done;
3825 
< prev index next >