< prev index next > src/hotspot/share/gc/z/zObjArrayAllocator.cpp
Print this page
#include "gc/z/zGeneration.inline.hpp"
#include "gc/z/zObjArrayAllocator.hpp"
#include "gc/z/zThreadLocalData.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "oops/arrayKlass.hpp"
+ #include "runtime/arguments.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "utilities/debug.hpp"
+ #include "utilities/globalDefinitions.hpp"
ZObjArrayAllocator::ZObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero, Thread* thread)
: ObjArrayAllocator(klass, word_size, length, do_zero, thread) {}
void ZObjArrayAllocator::yield_for_safepoint() const {
ThreadBlockInVM tbivm(JavaThread::cast(_thread));
}
+ static bool is_oop_containing_flat_array(ArrayKlass* ak) {
+ return ak->is_flatArray_klass() &&
+ FlatArrayKlass::cast(ak)->contains_oops();
+ }
+
oop ZObjArrayAllocator::initialize(HeapWord* mem) const {
// ZGC specializes the initialization by performing segmented clearing
// to allow shorter time-to-safepoints.
if (!_do_zero) {
// suggested that it offered a good trade-off between allocation
// time and time-to-safepoint
const size_t segment_max = ZUtils::bytes_to_words(64 * K);
if (_word_size <= segment_max) {
! // To small to use segmented clearing
return ObjArrayAllocator::initialize(mem);
}
// Segmented clearing
// The array is going to be exposed before it has been completely
// cleared, therefore we can't expose the header at the end of this
// function. Instead explicitly initialize it according to our needs.
// suggested that it offered a good trade-off between allocation
// time and time-to-safepoint
const size_t segment_max = ZUtils::bytes_to_words(64 * K);
if (_word_size <= segment_max) {
! // Too small to use segmented clearing
+ return ObjArrayAllocator::initialize(mem);
+ }
+
+ ArrayKlass* const ak = ArrayKlass::cast(_klass);
+
+ if (is_oop_containing_flat_array(ak)) {
+ // Flat arrays containing oops are not supported in ZGC without relying on
+ // internal-only features such as loose-consistency and null-restriction.
+ // A value object that contains an oop and a null-marker will always exceed
+ // 64 bits when using ZGC. As a result, such objects will not be flattened
+ // in practice due to the 64-bit atomicity limit.
+ //
+ // We only need to support flat arrays containing oops when/if value objects
+ // can be user-declared with loose-consistency and/or null-restriction.
return ObjArrayAllocator::initialize(mem);
}
+ const BasicType element_type = ak->element_type();
+
+ // Flat arrays containing oops are not supported and only contain primitives
+ // from here on out.
+ const bool is_oop_array = element_type != T_FLAT_ELEMENT &&
+ is_reference_type(element_type);
+
// Segmented clearing
// The array is going to be exposed before it has been completely
// cleared, therefore we can't expose the header at the end of this
// function. Instead explicitly initialize it according to our needs.
// Signal to the ZIterator that this is an invisible root, by setting
// the mark word to "marked". Reset to prototype() after the clearing.
if (UseCompactObjectHeaders) {
oopDesc::release_set_mark(mem, _klass->prototype_header().set_marked());
} else {
! arrayOopDesc::set_mark(mem, markWord::prototype().set_marked());
arrayOopDesc::release_set_klass(mem, _klass);
}
assert(_length >= 0, "length should be non-negative");
arrayOopDesc::set_length(mem, _length);
// Signal to the ZIterator that this is an invisible root, by setting
// the mark word to "marked". Reset to prototype() after the clearing.
if (UseCompactObjectHeaders) {
oopDesc::release_set_mark(mem, _klass->prototype_header().set_marked());
} else {
! if (Arguments::is_valhalla_enabled()) {
+ arrayOopDesc::set_mark(mem, _klass->prototype_header().set_marked());
+ } else {
+ arrayOopDesc::set_mark(mem, markWord::prototype().set_marked());
+ }
arrayOopDesc::release_set_klass(mem, _klass);
}
assert(_length >= 0, "length should be non-negative");
arrayOopDesc::set_length(mem, _length);
// and the marking logic will not attempt to follow its elements.
// Relocation and remembered set code know how to dodge iterating
// over such objects.
ZThreadLocalData::set_invisible_root(_thread, (zaddress_unsafe*)&mem);
- const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
const size_t base_offset_in_bytes = (size_t)arrayOopDesc::base_offset_in_bytes(element_type);
const size_t process_start_offset_in_bytes = align_up(base_offset_in_bytes, (size_t)BytesPerWord);
if (process_start_offset_in_bytes != base_offset_in_bytes) {
// initialize_memory can only fill word aligned memory,
// fill the first 4 bytes here.
assert(process_start_offset_in_bytes - base_offset_in_bytes == 4, "Must be 4-byte aligned");
! assert(!is_reference_type(element_type), "Only TypeArrays can be 4-byte aligned");
*reinterpret_cast<int*>(reinterpret_cast<char*>(mem) + base_offset_in_bytes) = 0;
}
// Note: initialize_memory may clear padding bytes at the end
const size_t process_start_offset = ZUtils::bytes_to_words(process_start_offset_in_bytes);
// and the marking logic will not attempt to follow its elements.
// Relocation and remembered set code know how to dodge iterating
// over such objects.
ZThreadLocalData::set_invisible_root(_thread, (zaddress_unsafe*)&mem);
const size_t base_offset_in_bytes = (size_t)arrayOopDesc::base_offset_in_bytes(element_type);
const size_t process_start_offset_in_bytes = align_up(base_offset_in_bytes, (size_t)BytesPerWord);
if (process_start_offset_in_bytes != base_offset_in_bytes) {
// initialize_memory can only fill word aligned memory,
// fill the first 4 bytes here.
assert(process_start_offset_in_bytes - base_offset_in_bytes == 4, "Must be 4-byte aligned");
! assert(!is_oop_array, "Only TypeArrays can be 4-byte aligned");
*reinterpret_cast<int*>(reinterpret_cast<char*>(mem) + base_offset_in_bytes) = 0;
}
// Note: initialize_memory may clear padding bytes at the end
const size_t process_start_offset = ZUtils::bytes_to_words(process_start_offset_in_bytes);
// bits, hoping to not get interrupted in the middle of a GC safepoint.
// Most of the time, we manage to do that, and can the avoid having GC
// barriers trigger slow paths for this.
const uintptr_t colored_null = seen_gc_safepoint ? (ZPointerStoreGoodMask | ZPointerRememberedMask)
: ZPointerStoreGoodMask;
! const uintptr_t fill_value = is_reference_type(element_type) ? colored_null : 0;
ZUtils::fill(start, segment, fill_value);
// Safepoint
yield_for_safepoint();
// Deal with safepoints
! if (is_reference_type(element_type) && !seen_gc_safepoint && gc_safepoint_happened()) {
// The first time we observe a GC safepoint in the yield point,
// we have to restart processing with 11 remembered bits.
seen_gc_safepoint = true;
return false;
}
// bits, hoping to not get interrupted in the middle of a GC safepoint.
// Most of the time, we manage to do that, and can the avoid having GC
// barriers trigger slow paths for this.
const uintptr_t colored_null = seen_gc_safepoint ? (ZPointerStoreGoodMask | ZPointerRememberedMask)
: ZPointerStoreGoodMask;
! const uintptr_t fill_value = is_oop_array ? colored_null : 0;
ZUtils::fill(start, segment, fill_value);
// Safepoint
yield_for_safepoint();
// Deal with safepoints
! if (is_oop_array && !seen_gc_safepoint && gc_safepoint_happened()) {
// The first time we observe a GC safepoint in the yield point,
// we have to restart processing with 11 remembered bits.
seen_gc_safepoint = true;
return false;
}
mem_zap_end_padding(mem);
ZThreadLocalData::clear_invisible_root(_thread);
// Signal to the ZIterator that this is no longer an invisible root
! if (UseCompactObjectHeaders) {
oopDesc::release_set_mark(mem, _klass->prototype_header());
} else {
oopDesc::release_set_mark(mem, markWord::prototype());
}
mem_zap_end_padding(mem);
ZThreadLocalData::clear_invisible_root(_thread);
// Signal to the ZIterator that this is no longer an invisible root
! if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) {
oopDesc::release_set_mark(mem, _klass->prototype_header());
} else {
oopDesc::release_set_mark(mem, markWord::prototype());
}
< prev index next >