< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page

   1 /*
   2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "asm/assembler.hpp"
  30 #include "asm/assembler.inline.hpp"
  31 #include "ci/ciEnv.hpp"
  32 #include "compiler/compileTask.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "gc/shared/cardTableBarrierSet.hpp"
  38 #include "gc/shared/cardTable.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "gc/shared/tlab_globals.hpp"
  41 #include "interpreter/bytecodeHistogram.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "jvm.h"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "nativeInst_aarch64.hpp"
  47 #include "oops/accessDecorators.hpp"
  48 #include "oops/compressedOops.inline.hpp"
  49 #include "oops/klass.inline.hpp"
  50 #include "runtime/continuation.hpp"
  51 #include "runtime/icache.hpp"
  52 #include "runtime/interfaceSupport.inline.hpp"
  53 #include "runtime/javaThread.hpp"
  54 #include "runtime/jniHandles.inline.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "runtime/stubRoutines.hpp"

  57 #include "utilities/powerOfTwo.hpp"
  58 #ifdef COMPILER1
  59 #include "c1/c1_LIRAssembler.hpp"
  60 #endif
  61 #ifdef COMPILER2
  62 #include "oops/oop.hpp"
  63 #include "opto/compile.hpp"
  64 #include "opto/node.hpp"
  65 #include "opto/output.hpp"
  66 #endif
  67 


  68 #ifdef PRODUCT
  69 #define BLOCK_COMMENT(str) /* nothing */
  70 #else
  71 #define BLOCK_COMMENT(str) block_comment(str)
  72 #endif
  73 #define STOP(str) stop(str);
  74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  75 
  76 #ifdef ASSERT
  77 extern "C" void disnm(intptr_t p);
  78 #endif
  79 // Target-dependent relocation processing
  80 //
  81 // Instruction sequences whose target may need to be retrieved or
  82 // patched are distinguished by their leading instruction, sorting
  83 // them into three main instruction groups and related subgroups.
  84 //
  85 // 1) Branch, Exception and System (insn count = 1)
  86 //    1a) Unconditional branch (immediate):
  87 //      b/bl imm19

4413   adrp(rscratch1, src2, offset);
4414   ldr(rscratch1, Address(rscratch1, offset));
4415   cmp(src1, rscratch1);
4416 }
4417 
4418 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4419   cmp(obj1, obj2);
4420 }
4421 
4422 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4423   load_method_holder(rresult, rmethod);
4424   ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4425 }
4426 
4427 void MacroAssembler::load_method_holder(Register holder, Register method) {
4428   ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
4429   ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
4430   ldr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
4431 }
4432 



















4433 void MacroAssembler::load_klass(Register dst, Register src) {
4434   if (UseCompressedClassPointers) {



4435     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4436     decode_klass_not_null(dst);
4437   } else {
4438     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4439   }
4440 }
4441 
4442 // ((OopHandle)result).resolve();
4443 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
4444   // OopHandle::resolve is an indirection.
4445   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
4446 }
4447 
4448 // ((WeakHandle)result).resolve();
4449 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
4450   assert_different_registers(result, tmp1, tmp2);
4451   Label resolved;
4452 
4453   // A null weak handle resolves to null.
4454   cbz(result, resolved);
4455 
4456   // Only 64 bit platforms support GCs that require a tmp register
4457   // WeakHandle::resolve is an indirection like jweak.
4458   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
4459                  result, Address(result), tmp1, tmp2);
4460   bind(resolved);
4461 }
4462 
4463 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
4464   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4465   ldr(dst, Address(rmethod, Method::const_offset()));
4466   ldr(dst, Address(dst, ConstMethod::constants_offset()));
4467   ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
4468   ldr(dst, Address(dst, mirror_offset));
4469   resolve_oop_handle(dst, tmp1, tmp2);
4470 }
4471 
4472 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {

4473   if (UseCompressedClassPointers) {
4474     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));




4475     if (CompressedKlassPointers::base() == nullptr) {
4476       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4477       return;
4478     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4479                && CompressedKlassPointers::shift() == 0) {
4480       // Only the bottom 32 bits matter
4481       cmpw(trial_klass, tmp);
4482       return;
4483     }
4484     decode_klass_not_null(tmp);
4485   } else {
4486     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4487   }
4488   cmp(trial_klass, tmp);
4489 }
4490 
















4491 void MacroAssembler::store_klass(Register dst, Register src) {
4492   // FIXME: Should this be a store release?  concurrent gcs assumes
4493   // klass length is valid if klass field is not null.

4494   if (UseCompressedClassPointers) {
4495     encode_klass_not_null(src);
4496     strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4497   } else {
4498     str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4499   }
4500 }
4501 
4502 void MacroAssembler::store_klass_gap(Register dst, Register src) {

4503   if (UseCompressedClassPointers) {
4504     // Store to klass gap in destination
4505     strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4506   }
4507 }
4508 
4509 // Algorithm must match CompressedOops::encode.
4510 void MacroAssembler::encode_heap_oop(Register d, Register s) {
4511 #ifdef ASSERT
4512   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
4513 #endif
4514   verify_oop_msg(s, "broken oop in encode_heap_oop");
4515   if (CompressedOops::base() == nullptr) {
4516     if (CompressedOops::shift() != 0) {
4517       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4518       lsr(d, s, LogMinObjAlignmentInBytes);
4519     } else {
4520       mov(d, s);
4521     }
4522   } else {

6305 
6306 
6307 // A double move
6308 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
6309  if (src.first()->is_stack()) {
6310     if (dst.first()->is_stack()) {
6311       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
6312       str(tmp, Address(sp, reg2offset_out(dst.first())));
6313     } else {
6314       ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
6315     }
6316   } else if (src.first() != dst.first()) {
6317     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
6318       fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
6319     else
6320       strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
6321   }
6322 }
6323 
6324 // Implements lightweight-locking.
6325 // Branches to slow upon failure to lock the object, with ZF cleared.
6326 // Falls through upon success with ZF set.
6327 //
6328 //  - obj: the object to be locked
6329 //  - hdr: the header, already loaded from obj, will be destroyed
6330 //  - t1, t2: temporary registers, will be destroyed
6331 void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
6332   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6333   assert_different_registers(obj, hdr, t1, t2, rscratch1);
6334 
6335   // Check if we would have space on lock-stack for the object.
6336   ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6337   cmpw(t1, (unsigned)LockStack::end_offset() - 1);
6338   br(Assembler::GT, slow);
6339 
6340   // Load (object->mark() | 1) into hdr
6341   orr(hdr, hdr, markWord::unlocked_value);
6342   // Clear lock-bits, into t2
6343   eor(t2, hdr, markWord::unlocked_value);
6344   // Try to swing header from unlocked to locked
6345   // Clobbers rscratch1 when UseLSE is false
6346   cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword,
6347           /*acquire*/ true, /*release*/ true, /*weak*/ false, t1);









6348   br(Assembler::NE, slow);
6349 
6350   // After successful lock, push object on lock-stack
6351   ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6352   str(obj, Address(rthread, t1));
6353   addw(t1, t1, oopSize);
6354   strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));








6355 }
6356 
6357 // Implements lightweight-unlocking.
6358 // Branches to slow upon failure, with ZF cleared.
6359 // Falls through upon success, with ZF set.
6360 //
6361 // - obj: the object to be unlocked
6362 // - hdr: the (pre-loaded) header of the object
6363 // - t1, t2: temporary registers
6364 void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
6365   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6366   assert_different_registers(obj, hdr, t1, t2, rscratch1);

6367 
6368 #ifdef ASSERT
6369   {
6370     // The following checks rely on the fact that LockStack is only ever modified by
6371     // its owning thread, even if the lock got inflated concurrently; removal of LockStack
6372     // entries after inflation will happen delayed in that case.
6373 
6374     // Check for lock-stack underflow.
6375     Label stack_ok;
6376     ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6377     cmpw(t1, (unsigned)LockStack::start_offset());
6378     br(Assembler::GT, stack_ok);
6379     STOP("Lock-stack underflow");
6380     bind(stack_ok);
6381   }
6382   {
6383     // Check if the top of the lock-stack matches the unlocked object.
6384     Label tos_ok;
6385     subw(t1, t1, oopSize);
6386     ldr(t1, Address(rthread, t1));
6387     cmpoop(t1, obj);
6388     br(Assembler::EQ, tos_ok);
6389     STOP("Top of lock-stack does not match the unlocked object");
6390     bind(tos_ok);
6391   }
6392   {
6393     // Check that hdr is fast-locked.
6394     Label hdr_ok;
6395     tst(hdr, markWord::lock_mask_in_place);
6396     br(Assembler::EQ, hdr_ok);
6397     STOP("Header is not fast-locked");
6398     bind(hdr_ok);
6399   }
6400 #endif
6401 
6402   // Load the new header (unlocked) into t1
6403   orr(t1, hdr, markWord::unlocked_value);


6404 
6405   // Try to swing header from locked to unlocked
6406   // Clobbers rscratch1 when UseLSE is false
6407   cmpxchg(obj, hdr, t1, Assembler::xword,
6408           /*acquire*/ true, /*release*/ true, /*weak*/ false, t2);

6409   br(Assembler::NE, slow);
6410 
6411   // After successful unlock, pop object from lock-stack
6412   ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6413   subw(t1, t1, oopSize);











6414 #ifdef ASSERT
6415   str(zr, Address(rthread, t1));




6416 #endif
6417   strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));















6418 }

   1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 


  26 #include "precompiled.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "asm/assembler.inline.hpp"
  29 #include "ci/ciEnv.hpp"
  30 #include "compiler/compileTask.hpp"
  31 #include "compiler/disassembler.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTable.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 #include "interpreter/bytecodeHistogram.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "jvm.h"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.hpp"
  44 #include "nativeInst_aarch64.hpp"
  45 #include "oops/accessDecorators.hpp"
  46 #include "oops/compressedOops.inline.hpp"
  47 #include "oops/klass.inline.hpp"
  48 #include "runtime/continuation.hpp"
  49 #include "runtime/icache.hpp"
  50 #include "runtime/interfaceSupport.inline.hpp"
  51 #include "runtime/javaThread.hpp"
  52 #include "runtime/jniHandles.inline.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "runtime/stubRoutines.hpp"
  55 #include "utilities/globalDefinitions.hpp"
  56 #include "utilities/powerOfTwo.hpp"
  57 #ifdef COMPILER1
  58 #include "c1/c1_LIRAssembler.hpp"
  59 #endif
  60 #ifdef COMPILER2
  61 #include "oops/oop.hpp"
  62 #include "opto/compile.hpp"
  63 #include "opto/node.hpp"
  64 #include "opto/output.hpp"
  65 #endif
  66 
  67 #include <sys/types.h>
  68 
  69 #ifdef PRODUCT
  70 #define BLOCK_COMMENT(str) /* nothing */
  71 #else
  72 #define BLOCK_COMMENT(str) block_comment(str)
  73 #endif
  74 #define STOP(str) stop(str);
  75 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  76 
  77 #ifdef ASSERT
  78 extern "C" void disnm(intptr_t p);
  79 #endif
  80 // Target-dependent relocation processing
  81 //
  82 // Instruction sequences whose target may need to be retrieved or
  83 // patched are distinguished by their leading instruction, sorting
  84 // them into three main instruction groups and related subgroups.
  85 //
  86 // 1) Branch, Exception and System (insn count = 1)
  87 //    1a) Unconditional branch (immediate):
  88 //      b/bl imm19

4414   adrp(rscratch1, src2, offset);
4415   ldr(rscratch1, Address(rscratch1, offset));
4416   cmp(src1, rscratch1);
4417 }
4418 
4419 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4420   cmp(obj1, obj2);
4421 }
4422 
4423 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4424   load_method_holder(rresult, rmethod);
4425   ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4426 }
4427 
4428 void MacroAssembler::load_method_holder(Register holder, Register method) {
4429   ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
4430   ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
4431   ldr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
4432 }
4433 
4434 // Loads the obj's Klass* into dst.
4435 // Preserves all registers (incl src, rscratch1 and rscratch2).
4436 void MacroAssembler::load_nklass_compact(Register dst, Register src) {
4437   assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders");
4438 
4439   Label fast;
4440 
4441   // Check if we can take the (common) fast path, if obj is unlocked.
4442   ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
4443   tbz(dst, exact_log2(markWord::monitor_value), fast);
4444 
4445   // Fetch displaced header
4446   ldr(dst, Address(dst, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
4447 
4448   // Fast-path: shift to get narrowKlass.
4449   bind(fast);
4450   lsr(dst, dst, markWord::klass_shift);
4451 }
4452 
4453 void MacroAssembler::load_klass(Register dst, Register src) {
4454   if (UseCompactObjectHeaders) {
4455     load_nklass_compact(dst, src);
4456     decode_klass_not_null(dst);
4457   } else if (UseCompressedClassPointers) {
4458     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4459     decode_klass_not_null(dst);
4460   } else {
4461     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4462   }
4463 }
4464 
4465 // ((OopHandle)result).resolve();
4466 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
4467   // OopHandle::resolve is an indirection.
4468   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
4469 }
4470 
4471 // ((WeakHandle)result).resolve();
4472 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
4473   assert_different_registers(result, tmp1, tmp2);
4474   Label resolved;
4475 
4476   // A null weak handle resolves to null.
4477   cbz(result, resolved);
4478 
4479   // Only 64 bit platforms support GCs that require a tmp register
4480   // WeakHandle::resolve is an indirection like jweak.
4481   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
4482                  result, Address(result), tmp1, tmp2);
4483   bind(resolved);
4484 }
4485 
4486 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
4487   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4488   ldr(dst, Address(rmethod, Method::const_offset()));
4489   ldr(dst, Address(dst, ConstMethod::constants_offset()));
4490   ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
4491   ldr(dst, Address(dst, mirror_offset));
4492   resolve_oop_handle(dst, tmp1, tmp2);
4493 }
4494 
4495 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4496   assert_different_registers(oop, trial_klass, tmp);
4497   if (UseCompressedClassPointers) {
4498     if (UseCompactObjectHeaders) {
4499       load_nklass_compact(tmp, oop);
4500     } else {
4501       ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4502     }
4503     if (CompressedKlassPointers::base() == nullptr) {
4504       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4505       return;
4506     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4507                && CompressedKlassPointers::shift() == 0) {
4508       // Only the bottom 32 bits matter
4509       cmpw(trial_klass, tmp);
4510       return;
4511     }
4512     decode_klass_not_null(tmp);
4513   } else {
4514     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4515   }
4516   cmp(trial_klass, tmp);
4517 }
4518 
4519 void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) {
4520   if (UseCompactObjectHeaders) {
4521     load_nklass_compact(tmp1, src);
4522     load_nklass_compact(tmp2, dst);
4523     cmpw(tmp1, tmp2);
4524   } else if (UseCompressedClassPointers) {
4525     ldrw(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
4526     ldrw(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
4527     cmpw(tmp1, tmp2);
4528   } else {
4529     ldr(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
4530     ldr(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
4531     cmp(tmp1, tmp2);
4532   }
4533 }
4534 
4535 void MacroAssembler::store_klass(Register dst, Register src) {
4536   // FIXME: Should this be a store release?  concurrent gcs assumes
4537   // klass length is valid if klass field is not null.
4538   assert(!UseCompactObjectHeaders, "not with compact headers");
4539   if (UseCompressedClassPointers) {
4540     encode_klass_not_null(src);
4541     strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4542   } else {
4543     str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4544   }
4545 }
4546 
4547 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4548   assert(!UseCompactObjectHeaders, "not with compact headers");
4549   if (UseCompressedClassPointers) {
4550     // Store to klass gap in destination
4551     strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4552   }
4553 }
4554 
4555 // Algorithm must match CompressedOops::encode.
4556 void MacroAssembler::encode_heap_oop(Register d, Register s) {
4557 #ifdef ASSERT
4558   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
4559 #endif
4560   verify_oop_msg(s, "broken oop in encode_heap_oop");
4561   if (CompressedOops::base() == nullptr) {
4562     if (CompressedOops::shift() != 0) {
4563       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4564       lsr(d, s, LogMinObjAlignmentInBytes);
4565     } else {
4566       mov(d, s);
4567     }
4568   } else {

6351 
6352 
6353 // A double move
6354 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
6355  if (src.first()->is_stack()) {
6356     if (dst.first()->is_stack()) {
6357       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
6358       str(tmp, Address(sp, reg2offset_out(dst.first())));
6359     } else {
6360       ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
6361     }
6362   } else if (src.first() != dst.first()) {
6363     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
6364       fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
6365     else
6366       strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
6367   }
6368 }
6369 
6370 // Implements lightweight-locking.


6371 //
6372 //  - obj: the object to be locked
6373 //  - t1, t2, t3: temporary registers, will be destroyed
6374 //  - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
6375 void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
6376   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6377   assert_different_registers(obj, t1, t2, t3, rscratch1);
6378 
6379   Label push;
6380   const Register top = t1;
6381   const Register mark = t2;
6382   const Register t = t3;
6383 
6384   // Preload the markWord. It is important that this is the first
6385   // instruction emitted as it is part of C1's null check semantics.
6386   ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
6387 
6388   // Check if the lock-stack is full.
6389   ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6390   cmpw(top, (unsigned)LockStack::end_offset());
6391   br(Assembler::GE, slow);
6392 
6393   // Check for recursion.
6394   subw(t, top, oopSize);
6395   ldr(t, Address(rthread, t));
6396   cmp(obj, t);
6397   br(Assembler::EQ, push);
6398 
6399   // Check header for monitor (0b10).
6400   tst(mark, markWord::monitor_value);
6401   br(Assembler::NE, slow);
6402 
6403   // Try to lock. Transition lock bits 0b01 => 0b00
6404   assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
6405   orr(mark, mark, markWord::unlocked_value);
6406   eor(t, mark, markWord::unlocked_value);
6407   cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
6408           /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
6409   br(Assembler::NE, slow);
6410 
6411   bind(push);
6412   // After successful lock, push object on lock-stack.
6413   str(obj, Address(rthread, top));
6414   addw(top, top, oopSize);
6415   strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6416 }
6417 
6418 // Implements lightweight-unlocking.


6419 //
6420 // - obj: the object to be unlocked
6421 // - t1, t2, t3: temporary registers
6422 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
6423 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
6424   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6425   // cmpxchg clobbers rscratch1.
6426   assert_different_registers(obj, t1, t2, t3, rscratch1);
6427 
6428 #ifdef ASSERT
6429   {




6430     // Check for lock-stack underflow.
6431     Label stack_ok;
6432     ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6433     cmpw(t1, (unsigned)LockStack::start_offset());
6434     br(Assembler::GE, stack_ok);
6435     STOP("Lock-stack underflow");
6436     bind(stack_ok);
6437   }


















6438 #endif
6439 
6440   Label unlocked, push_and_slow;
6441   const Register top = t1;
6442   const Register mark = t2;
6443   const Register t = t3;
6444 
6445   // Check if obj is top of lock-stack.
6446   ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6447   subw(top, top, oopSize);
6448   ldr(t, Address(rthread, top));
6449   cmp(obj, t);
6450   br(Assembler::NE, slow);
6451 
6452   // Pop lock-stack.
6453   DEBUG_ONLY(str(zr, Address(rthread, top));)
6454   strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6455 
6456   // Check if recursive.
6457   subw(t, top, oopSize);
6458   ldr(t, Address(rthread, t));
6459   cmp(obj, t);
6460   br(Assembler::EQ, unlocked);
6461 
6462   // Not recursive. Check header for monitor (0b10).
6463   ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
6464   tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
6465 
6466 #ifdef ASSERT
6467   // Check header not unlocked (0b01).
6468   Label not_unlocked;
6469   tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
6470   stop("lightweight_unlock already unlocked");
6471   bind(not_unlocked);
6472 #endif
6473 
6474   // Try to unlock. Transition lock bits 0b00 => 0b01
6475   assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
6476   orr(t, mark, markWord::unlocked_value);
6477   cmpxchg(obj, mark, t, Assembler::xword,
6478           /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
6479   br(Assembler::EQ, unlocked);
6480 
6481   bind(push_and_slow);
6482   // Restore lock-stack and handle the unlock in runtime.
6483   DEBUG_ONLY(str(obj, Address(rthread, top));)
6484   addw(top, top, oopSize);
6485   strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6486   b(slow);
6487 
6488   bind(unlocked);
6489 }
< prev index next >