1 /*
2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include <sys/types.h>
27
28 #include "precompiled.hpp"
29 #include "asm/assembler.hpp"
30 #include "asm/assembler.inline.hpp"
31 #include "ci/ciEnv.hpp"
32 #include "compiler/compileTask.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/barrierSetAssembler.hpp"
37 #include "gc/shared/cardTableBarrierSet.hpp"
38 #include "gc/shared/cardTable.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/tlab_globals.hpp"
41 #include "interpreter/bytecodeHistogram.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "jvm.h"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "nativeInst_aarch64.hpp"
47 #include "oops/accessDecorators.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "oops/klass.inline.hpp"
50 #include "runtime/continuation.hpp"
51 #include "runtime/icache.hpp"
52 #include "runtime/interfaceSupport.inline.hpp"
53 #include "runtime/javaThread.hpp"
54 #include "runtime/jniHandles.inline.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/stubRoutines.hpp"
57 #include "utilities/powerOfTwo.hpp"
58 #ifdef COMPILER1
59 #include "c1/c1_LIRAssembler.hpp"
60 #endif
61 #ifdef COMPILER2
62 #include "oops/oop.hpp"
63 #include "opto/compile.hpp"
64 #include "opto/node.hpp"
65 #include "opto/output.hpp"
66 #endif
67
68 #ifdef PRODUCT
69 #define BLOCK_COMMENT(str) /* nothing */
70 #else
71 #define BLOCK_COMMENT(str) block_comment(str)
72 #endif
73 #define STOP(str) stop(str);
74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
75
76 #ifdef ASSERT
77 extern "C" void disnm(intptr_t p);
78 #endif
79 // Target-dependent relocation processing
80 //
81 // Instruction sequences whose target may need to be retrieved or
82 // patched are distinguished by their leading instruction, sorting
83 // them into three main instruction groups and related subgroups.
84 //
85 // 1) Branch, Exception and System (insn count = 1)
86 // 1a) Unconditional branch (immediate):
87 // b/bl imm19
4309 adrp(rscratch1, src2, offset);
4310 ldr(rscratch1, Address(rscratch1, offset));
4311 cmp(src1, rscratch1);
4312 }
4313
4314 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4315 cmp(obj1, obj2);
4316 }
4317
4318 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4319 load_method_holder(rresult, rmethod);
4320 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4321 }
4322
4323 void MacroAssembler::load_method_holder(Register holder, Register method) {
4324 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
4325 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
4326 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
4327 }
4328
4329 void MacroAssembler::load_klass(Register dst, Register src) {
4330 if (UseCompressedClassPointers) {
4331 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4332 decode_klass_not_null(dst);
4333 } else {
4334 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4335 }
4336 }
4337
4338 // ((OopHandle)result).resolve();
4339 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
4340 // OopHandle::resolve is an indirection.
4341 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
4342 }
4343
4344 // ((WeakHandle)result).resolve();
4345 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
4346 assert_different_registers(result, tmp1, tmp2);
4347 Label resolved;
4348
4349 // A null weak handle resolves to null.
4350 cbz(result, resolved);
4351
4352 // Only 64 bit platforms support GCs that require a tmp register
4353 // WeakHandle::resolve is an indirection like jweak.
4354 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
4355 result, Address(result), tmp1, tmp2);
4356 bind(resolved);
4357 }
4358
4359 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
4360 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4361 ldr(dst, Address(rmethod, Method::const_offset()));
4362 ldr(dst, Address(dst, ConstMethod::constants_offset()));
4363 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
4364 ldr(dst, Address(dst, mirror_offset));
4365 resolve_oop_handle(dst, tmp1, tmp2);
4366 }
4367
4368 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4369 if (UseCompressedClassPointers) {
4370 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4371 if (CompressedKlassPointers::base() == nullptr) {
4372 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4373 return;
4374 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4375 && CompressedKlassPointers::shift() == 0) {
4376 // Only the bottom 32 bits matter
4377 cmpw(trial_klass, tmp);
4378 return;
4379 }
4380 decode_klass_not_null(tmp);
4381 } else {
4382 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4383 }
4384 cmp(trial_klass, tmp);
4385 }
4386
4387 void MacroAssembler::store_klass(Register dst, Register src) {
4388 // FIXME: Should this be a store release? concurrent gcs assumes
4389 // klass length is valid if klass field is not null.
4390 if (UseCompressedClassPointers) {
4391 encode_klass_not_null(src);
4392 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4393 } else {
4394 str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4395 }
4396 }
4397
4398 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4399 if (UseCompressedClassPointers) {
4400 // Store to klass gap in destination
4401 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4402 }
4403 }
4404
4405 // Algorithm must match CompressedOops::encode.
4406 void MacroAssembler::encode_heap_oop(Register d, Register s) {
4407 #ifdef ASSERT
4408 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
4409 #endif
4410 verify_oop_msg(s, "broken oop in encode_heap_oop");
4411 if (CompressedOops::base() == nullptr) {
4412 if (CompressedOops::shift() != 0) {
4413 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4414 lsr(d, s, LogMinObjAlignmentInBytes);
4415 } else {
4416 mov(d, s);
4417 }
4418 } else {
6209
6210
6211 // A double move
6212 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
6213 if (src.first()->is_stack()) {
6214 if (dst.first()->is_stack()) {
6215 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
6216 str(tmp, Address(sp, reg2offset_out(dst.first())));
6217 } else {
6218 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
6219 }
6220 } else if (src.first() != dst.first()) {
6221 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
6222 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
6223 else
6224 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
6225 }
6226 }
6227
6228 // Implements lightweight-locking.
6229 // Branches to slow upon failure to lock the object, with ZF cleared.
6230 // Falls through upon success with ZF set.
6231 //
6232 // - obj: the object to be locked
6233 // - hdr: the header, already loaded from obj, will be destroyed
6234 // - t1, t2: temporary registers, will be destroyed
6235 void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
6236 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6237 assert_different_registers(obj, hdr, t1, t2, rscratch1);
6238
6239 // Check if we would have space on lock-stack for the object.
6240 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6241 cmpw(t1, (unsigned)LockStack::end_offset() - 1);
6242 br(Assembler::GT, slow);
6243
6244 // Load (object->mark() | 1) into hdr
6245 orr(hdr, hdr, markWord::unlocked_value);
6246 // Clear lock-bits, into t2
6247 eor(t2, hdr, markWord::unlocked_value);
6248 // Try to swing header from unlocked to locked
6249 // Clobbers rscratch1 when UseLSE is false
6250 cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword,
6251 /*acquire*/ true, /*release*/ true, /*weak*/ false, t1);
6252 br(Assembler::NE, slow);
6253
6254 // After successful lock, push object on lock-stack
6255 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6256 str(obj, Address(rthread, t1));
6257 addw(t1, t1, oopSize);
6258 strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6259 }
6260
6261 // Implements lightweight-unlocking.
6262 // Branches to slow upon failure, with ZF cleared.
6263 // Falls through upon success, with ZF set.
6264 //
6265 // - obj: the object to be unlocked
6266 // - hdr: the (pre-loaded) header of the object
6267 // - t1, t2: temporary registers
6268 void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
6269 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6270 assert_different_registers(obj, hdr, t1, t2, rscratch1);
6271
6272 #ifdef ASSERT
6273 {
6274 // The following checks rely on the fact that LockStack is only ever modified by
6275 // its owning thread, even if the lock got inflated concurrently; removal of LockStack
6276 // entries after inflation will happen delayed in that case.
6277
6278 // Check for lock-stack underflow.
6279 Label stack_ok;
6280 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6281 cmpw(t1, (unsigned)LockStack::start_offset());
6282 br(Assembler::GT, stack_ok);
6283 STOP("Lock-stack underflow");
6284 bind(stack_ok);
6285 }
6286 {
6287 // Check if the top of the lock-stack matches the unlocked object.
6288 Label tos_ok;
6289 subw(t1, t1, oopSize);
6290 ldr(t1, Address(rthread, t1));
6291 cmpoop(t1, obj);
6292 br(Assembler::EQ, tos_ok);
6293 STOP("Top of lock-stack does not match the unlocked object");
6294 bind(tos_ok);
6295 }
6296 {
6297 // Check that hdr is fast-locked.
6298 Label hdr_ok;
6299 tst(hdr, markWord::lock_mask_in_place);
6300 br(Assembler::EQ, hdr_ok);
6301 STOP("Header is not fast-locked");
6302 bind(hdr_ok);
6303 }
6304 #endif
6305
6306 // Load the new header (unlocked) into t1
6307 orr(t1, hdr, markWord::unlocked_value);
6308
6309 // Try to swing header from locked to unlocked
6310 // Clobbers rscratch1 when UseLSE is false
6311 cmpxchg(obj, hdr, t1, Assembler::xword,
6312 /*acquire*/ true, /*release*/ true, /*weak*/ false, t2);
6313 br(Assembler::NE, slow);
6314
6315 // After successful unlock, pop object from lock-stack
6316 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6317 subw(t1, t1, oopSize);
6318 #ifdef ASSERT
6319 str(zr, Address(rthread, t1));
6320 #endif
6321 strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6322 }
|
1 /*
2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "ci/ciEnv.hpp"
30 #include "compiler/compileTask.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/cardTable.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39 #include "interpreter/bytecodeHistogram.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "jvm.h"
42 #include "memory/resourceArea.hpp"
43 #include "memory/universe.hpp"
44 #include "nativeInst_aarch64.hpp"
45 #include "oops/accessDecorators.hpp"
46 #include "oops/compressedOops.inline.hpp"
47 #include "oops/klass.inline.hpp"
48 #include "runtime/continuation.hpp"
49 #include "runtime/icache.hpp"
50 #include "runtime/interfaceSupport.inline.hpp"
51 #include "runtime/javaThread.hpp"
52 #include "runtime/jniHandles.inline.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "runtime/stubRoutines.hpp"
55 #include "utilities/globalDefinitions.hpp"
56 #include "utilities/powerOfTwo.hpp"
57 #ifdef COMPILER1
58 #include "c1/c1_LIRAssembler.hpp"
59 #endif
60 #ifdef COMPILER2
61 #include "oops/oop.hpp"
62 #include "opto/compile.hpp"
63 #include "opto/node.hpp"
64 #include "opto/output.hpp"
65 #endif
66
67 #include <sys/types.h>
68
69 #ifdef PRODUCT
70 #define BLOCK_COMMENT(str) /* nothing */
71 #else
72 #define BLOCK_COMMENT(str) block_comment(str)
73 #endif
74 #define STOP(str) stop(str);
75 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
76
77 #ifdef ASSERT
78 extern "C" void disnm(intptr_t p);
79 #endif
80 // Target-dependent relocation processing
81 //
82 // Instruction sequences whose target may need to be retrieved or
83 // patched are distinguished by their leading instruction, sorting
84 // them into three main instruction groups and related subgroups.
85 //
86 // 1) Branch, Exception and System (insn count = 1)
87 // 1a) Unconditional branch (immediate):
88 // b/bl imm19
4310 adrp(rscratch1, src2, offset);
4311 ldr(rscratch1, Address(rscratch1, offset));
4312 cmp(src1, rscratch1);
4313 }
4314
4315 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4316 cmp(obj1, obj2);
4317 }
4318
4319 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4320 load_method_holder(rresult, rmethod);
4321 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4322 }
4323
4324 void MacroAssembler::load_method_holder(Register holder, Register method) {
4325 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
4326 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
4327 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
4328 }
4329
4330 // Loads the obj's Klass* into dst.
4331 // Preserves all registers (incl src, rscratch1 and rscratch2).
4332 void MacroAssembler::load_nklass_compact(Register dst, Register src) {
4333 assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders");
4334
4335 Label fast;
4336
4337 // Check if we can take the (common) fast path, if obj is unlocked.
4338 ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
4339 tbz(dst, exact_log2(markWord::monitor_value), fast);
4340
4341 // Fetch displaced header
4342 ldr(dst, Address(dst, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
4343
4344 // Fast-path: shift to get narrowKlass.
4345 bind(fast);
4346 lsr(dst, dst, markWord::klass_shift);
4347 }
4348
4349 void MacroAssembler::load_klass(Register dst, Register src) {
4350 if (UseCompactObjectHeaders) {
4351 load_nklass_compact(dst, src);
4352 decode_klass_not_null(dst);
4353 } else if (UseCompressedClassPointers) {
4354 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4355 decode_klass_not_null(dst);
4356 } else {
4357 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4358 }
4359 }
4360
4361 // ((OopHandle)result).resolve();
4362 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
4363 // OopHandle::resolve is an indirection.
4364 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
4365 }
4366
4367 // ((WeakHandle)result).resolve();
4368 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
4369 assert_different_registers(result, tmp1, tmp2);
4370 Label resolved;
4371
4372 // A null weak handle resolves to null.
4373 cbz(result, resolved);
4374
4375 // Only 64 bit platforms support GCs that require a tmp register
4376 // WeakHandle::resolve is an indirection like jweak.
4377 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
4378 result, Address(result), tmp1, tmp2);
4379 bind(resolved);
4380 }
4381
4382 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
4383 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
4384 ldr(dst, Address(rmethod, Method::const_offset()));
4385 ldr(dst, Address(dst, ConstMethod::constants_offset()));
4386 ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
4387 ldr(dst, Address(dst, mirror_offset));
4388 resolve_oop_handle(dst, tmp1, tmp2);
4389 }
4390
4391 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4392 assert_different_registers(oop, trial_klass, tmp);
4393 if (UseCompressedClassPointers) {
4394 if (UseCompactObjectHeaders) {
4395 load_nklass_compact(tmp, oop);
4396 } else {
4397 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4398 }
4399 if (CompressedKlassPointers::base() == nullptr) {
4400 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4401 return;
4402 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4403 && CompressedKlassPointers::shift() == 0) {
4404 // Only the bottom 32 bits matter
4405 cmpw(trial_klass, tmp);
4406 return;
4407 }
4408 decode_klass_not_null(tmp);
4409 } else {
4410 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4411 }
4412 cmp(trial_klass, tmp);
4413 }
4414
4415 void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) {
4416 if (UseCompactObjectHeaders) {
4417 load_nklass_compact(tmp1, src);
4418 load_nklass_compact(tmp2, dst);
4419 cmpw(tmp1, tmp2);
4420 } else if (UseCompressedClassPointers) {
4421 ldrw(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
4422 ldrw(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
4423 cmpw(tmp1, tmp2);
4424 } else {
4425 ldr(tmp1, Address(src, oopDesc::klass_offset_in_bytes()));
4426 ldr(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
4427 cmp(tmp1, tmp2);
4428 }
4429 }
4430
4431 void MacroAssembler::store_klass(Register dst, Register src) {
4432 // FIXME: Should this be a store release? concurrent gcs assumes
4433 // klass length is valid if klass field is not null.
4434 assert(!UseCompactObjectHeaders, "not with compact headers");
4435 if (UseCompressedClassPointers) {
4436 encode_klass_not_null(src);
4437 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4438 } else {
4439 str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4440 }
4441 }
4442
4443 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4444 assert(!UseCompactObjectHeaders, "not with compact headers");
4445 if (UseCompressedClassPointers) {
4446 // Store to klass gap in destination
4447 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4448 }
4449 }
4450
4451 // Algorithm must match CompressedOops::encode.
4452 void MacroAssembler::encode_heap_oop(Register d, Register s) {
4453 #ifdef ASSERT
4454 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
4455 #endif
4456 verify_oop_msg(s, "broken oop in encode_heap_oop");
4457 if (CompressedOops::base() == nullptr) {
4458 if (CompressedOops::shift() != 0) {
4459 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4460 lsr(d, s, LogMinObjAlignmentInBytes);
4461 } else {
4462 mov(d, s);
4463 }
4464 } else {
6255
6256
6257 // A double move
6258 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
6259 if (src.first()->is_stack()) {
6260 if (dst.first()->is_stack()) {
6261 ldr(tmp, Address(rfp, reg2offset_in(src.first())));
6262 str(tmp, Address(sp, reg2offset_out(dst.first())));
6263 } else {
6264 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
6265 }
6266 } else if (src.first() != dst.first()) {
6267 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
6268 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
6269 else
6270 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
6271 }
6272 }
6273
6274 // Implements lightweight-locking.
6275 //
6276 // - obj: the object to be locked
6277 // - t1, t2, t3: temporary registers, will be destroyed
6278 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
6279 void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
6280 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6281 assert_different_registers(obj, t1, t2, t3, rscratch1);
6282
6283 Label push;
6284 const Register top = t1;
6285 const Register mark = t2;
6286 const Register t = t3;
6287
6288 // Preload the markWord. It is important that this is the first
6289 // instruction emitted as it is part of C1's null check semantics.
6290 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
6291
6292 // Check if the lock-stack is full.
6293 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6294 cmpw(top, (unsigned)LockStack::end_offset());
6295 br(Assembler::GE, slow);
6296
6297 // Check for recursion.
6298 subw(t, top, oopSize);
6299 ldr(t, Address(rthread, t));
6300 cmp(obj, t);
6301 br(Assembler::EQ, push);
6302
6303 // Check header for monitor (0b10).
6304 tst(mark, markWord::monitor_value);
6305 br(Assembler::NE, slow);
6306
6307 // Try to lock. Transition lock bits 0b01 => 0b00
6308 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
6309 orr(mark, mark, markWord::unlocked_value);
6310 eor(t, mark, markWord::unlocked_value);
6311 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
6312 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
6313 br(Assembler::NE, slow);
6314
6315 bind(push);
6316 // After successful lock, push object on lock-stack.
6317 str(obj, Address(rthread, top));
6318 addw(top, top, oopSize);
6319 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6320 }
6321
6322 // Implements lightweight-unlocking.
6323 //
6324 // - obj: the object to be unlocked
6325 // - t1, t2, t3: temporary registers
6326 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
6327 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
6328 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6329 // cmpxchg clobbers rscratch1.
6330 assert_different_registers(obj, t1, t2, t3, rscratch1);
6331
6332 #ifdef ASSERT
6333 {
6334 // Check for lock-stack underflow.
6335 Label stack_ok;
6336 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6337 cmpw(t1, (unsigned)LockStack::start_offset());
6338 br(Assembler::GE, stack_ok);
6339 STOP("Lock-stack underflow");
6340 bind(stack_ok);
6341 }
6342 #endif
6343
6344 Label unlocked, push_and_slow;
6345 const Register top = t1;
6346 const Register mark = t2;
6347 const Register t = t3;
6348
6349 // Check if obj is top of lock-stack.
6350 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6351 subw(top, top, oopSize);
6352 ldr(t, Address(rthread, top));
6353 cmp(obj, t);
6354 br(Assembler::NE, slow);
6355
6356 // Pop lock-stack.
6357 DEBUG_ONLY(str(zr, Address(rthread, top));)
6358 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6359
6360 // Check if recursive.
6361 subw(t, top, oopSize);
6362 ldr(t, Address(rthread, t));
6363 cmp(obj, t);
6364 br(Assembler::EQ, unlocked);
6365
6366 // Not recursive. Check header for monitor (0b10).
6367 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
6368 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
6369
6370 #ifdef ASSERT
6371 // Check header not unlocked (0b01).
6372 Label not_unlocked;
6373 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
6374 stop("lightweight_unlock already unlocked");
6375 bind(not_unlocked);
6376 #endif
6377
6378 // Try to unlock. Transition lock bits 0b00 => 0b01
6379 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
6380 orr(t, mark, markWord::unlocked_value);
6381 cmpxchg(obj, mark, t, Assembler::xword,
6382 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
6383 br(Assembler::EQ, unlocked);
6384
6385 bind(push_and_slow);
6386 // Restore lock-stack and handle the unlock in runtime.
6387 DEBUG_ONLY(str(obj, Address(rthread, top));)
6388 addw(top, top, oopSize);
6389 strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
6390 b(slow);
6391
6392 bind(unlocked);
6393 }
|