< prev index next > src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
Print this page
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
#include "gc/shared/tlab_globals.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markWord.hpp"
#include "runtime/basicLock.hpp"
+ #include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+ #include "utilities/globalDefinitions.hpp"
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction");
movl(hdr, Address(hdr, Klass::access_flags_offset()));
testl(hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
jcc(Assembler::notZero, slow_case);
}
- // Load object header
- movptr(hdr, Address(obj, hdr_offset));
-
if (LockingMode == LM_LIGHTWEIGHT) {
#ifdef _LP64
const Register thread = r15_thread;
#else
const Register thread = disp_hdr;
get_thread(thread);
#endif
lightweight_lock(obj, hdr, thread, tmp, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
+ // Load object header
+ movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
orptr(hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
movptr(Address(disp_hdr, 0), hdr);
// test if object header is still the same (i.e. unlocked), and if so, store the
// load object
movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {
- movptr(disp_hdr, Address(obj, hdr_offset));
- andptr(disp_hdr, ~(int32_t)markWord::lock_mask_in_place);
- lightweight_unlock(obj, disp_hdr, hdr, slow_case);
+ #ifdef _LP64
+ lightweight_unlock(obj, disp_hdr, r15_thread, hdr, slow_case);
+ #else
+ // This relies on the implementation of lightweight_unlock being able to handle
+ // that the reg_rax and thread Register parameters may alias each other.
+ get_thread(disp_hdr);
+ lightweight_unlock(obj, disp_hdr, disp_hdr, hdr, slow_case);
+ #endif
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
// the displaced header, get the object header instead
MacroAssembler::lock(); // must be immediately before cmpxchg!
}
}
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
- assert_different_registers(obj, klass, len);
- movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
+ assert_different_registers(obj, klass, len, t1, t2);
#ifdef _LP64
- if (UseCompressedClassPointers) { // Take care not to kill klass
+ if (UseCompactObjectHeaders) {
+ movptr(t1, Address(klass, Klass::prototype_header_offset()));
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
+ } else if (UseCompressedClassPointers) { // Take care not to kill klass
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
movptr(t1, klass);
encode_klass_not_null(t1, rscratch1);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
} else
#endif
{
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
}
if (len->is_valid()) {
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
}
#ifdef _LP64
- else if (UseCompressedClassPointers) {
+ else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
xorptr(t1, t1);
store_klass_gap(obj, t1);
}
#endif
}
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) {
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
"con_size_in_bytes is not multiple of alignment");
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
+ assert(!UseCompactObjectHeaders || hdr_size_in_bytes == 8, "check object headers size");
initialize_header(obj, klass, noreg, t1, t2);
if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
// clear rest of allocated space
}
verify_oop(obj);
}
- void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) {
+ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case) {
assert(obj == rax, "obj must be in rax, for cmpxchg");
assert_different_registers(obj, len, t1, t2, klass);
// determine alignment mask
assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
cmpptr(len, checked_cast<int32_t>(max_array_allocation_length));
jcc(Assembler::above, slow_case);
const Register arr_size = t2; // okay to be the same
// align object end
- movptr(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask);
+ movptr(arr_size, base_offset_in_bytes + MinObjAlignmentInBytesMask);
lea(arr_size, Address(arr_size, len, f));
andptr(arr_size, ~MinObjAlignmentInBytesMask);
try_allocate(obj, arr_size, 0, t1, t2, slow_case);
initialize_header(obj, klass, len, t1, t2);
+ // Clear leading 4 bytes, if necessary.
+ // TODO: This could perhaps go into initialize_body() and also clear the leading 4 bytes
+ // for non-array objects, thereby replacing the klass-gap clearing code in initialize_header().
+ int base_offset = base_offset_in_bytes;
+ #ifdef _LP64
+ if (!is_aligned(base_offset, BytesPerWord)) {
+ assert(is_aligned(base_offset, BytesPerInt), "must be 4-byte aligned");
+ movl(Address(obj, base_offset), 0);
+ base_offset += BytesPerInt;
+ }
+ #endif
+ assert(is_aligned(base_offset, BytesPerWord), "must be word aligned");
+
// clear rest of allocated space
const Register len_zero = len;
- initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
+ initialize_body(obj, arr_size, base_offset, len_zero);
if (CURRENT_ENV->dtrace_alloc_probes()) {
assert(obj == rax, "must be");
call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
}
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
verify_oop(receiver);
// explicit null check not needed since load from [klass_offset] causes a trap
- // check against inline cache
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
+ // check against inline cache. This is checked in Universe::genesis().
int start_offset = offset();
if (UseCompressedClassPointers) {
load_klass(rscratch1, receiver, rscratch2);
cmpptr(rscratch1, iCache);
< prev index next >