1 /* 2 * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/z/zThreadLocalData.hpp" 26 #include "gc/z/zObjArrayAllocator.hpp" 27 #include "gc/z/zUtils.inline.hpp" 28 #include "oops/arrayKlass.hpp" 29 #include "runtime/interfaceSupport.inline.hpp" 30 #include "utilities/debug.hpp" 31 32 ZObjArrayAllocator::ZObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero, Thread* thread) 33 : ObjArrayAllocator(klass, word_size, length, do_zero, thread) {} 34 35 void ZObjArrayAllocator::yield_for_safepoint() const { 36 ThreadBlockInVM tbivm(JavaThread::cast(_thread)); 37 } 38 39 oop ZObjArrayAllocator::initialize(HeapWord* mem) const { 40 // ZGC specializes the initialization by performing segmented clearing 41 // to allow shorter time-to-safepoints. 42 43 if (!_do_zero) { 44 // No need for ZGC specialization 45 return ObjArrayAllocator::initialize(mem); 46 } 47 48 // A max segment size of 64K was chosen because microbenchmarking 49 // suggested that it offered a good trade-off between allocation 50 // time and time-to-safepoint 51 const size_t segment_max = ZUtils::bytes_to_words(64 * K); 52 53 if (_word_size <= segment_max) { 54 // To small to use segmented clearing 55 return ObjArrayAllocator::initialize(mem); 56 } 57 58 // Segmented clearing 59 60 // The array is going to be exposed before it has been completely 61 // cleared, therefore we can't expose the header at the end of this 62 // function. Instead explicitly initialize it according to our needs. 63 64 // Signal to the ZIterator that this is an invisible root, by setting 65 // the mark word to "marked". Reset to prototype() after the clearing. 66 if (UseCompactObjectHeaders) { 67 oopDesc::release_set_mark(mem, _klass->prototype_header().set_marked()); 68 } else { 69 arrayOopDesc::set_mark(mem, markWord::prototype().set_marked()); 70 arrayOopDesc::release_set_klass(mem, _klass); 71 } 72 assert(_length >= 0, "length should be non-negative"); 73 arrayOopDesc::set_length(mem, _length); 74 75 // Keep the array alive across safepoints through an invisible 76 // root. Invisible roots are not visited by the heap iterator 77 // and the marking logic will not attempt to follow its elements. 78 // Relocation and remembered set code know how to dodge iterating 79 // over such objects. 80 ZThreadLocalData::set_invisible_root(_thread, (zaddress_unsafe*)&mem); 81 82 const BasicType element_type = ArrayKlass::cast(_klass)->element_type(); 83 const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type); 84 const size_t process_start_offset_in_bytes = align_up(base_offset_in_bytes, BytesPerWord); 85 86 if (process_start_offset_in_bytes != base_offset_in_bytes) { 87 // initialize_memory can only fill word aligned memory, 88 // fill the first 4 bytes here. 89 assert(process_start_offset_in_bytes - base_offset_in_bytes == 4, "Must be 4-byte aligned"); 90 assert(!is_reference_type(element_type), "Only TypeArrays can be 4-byte aligned"); 91 *reinterpret_cast<int*>(reinterpret_cast<char*>(mem) + base_offset_in_bytes) = 0; 92 } 93 94 // Note: initialize_memory may clear padding bytes at the end 95 const size_t process_start_offset = ZUtils::bytes_to_words(process_start_offset_in_bytes); 96 const size_t process_size = _word_size - process_start_offset; 97 98 uint32_t old_seqnum_before = ZGeneration::old()->seqnum(); 99 uint32_t young_seqnum_before = ZGeneration::young()->seqnum(); 100 uintptr_t color_before = ZPointerStoreGoodMask; 101 auto gc_safepoint_happened = [&]() { 102 return old_seqnum_before != ZGeneration::old()->seqnum() || 103 young_seqnum_before != ZGeneration::young()->seqnum() || 104 color_before != ZPointerStoreGoodMask; 105 }; 106 107 bool seen_gc_safepoint = false; 108 109 auto initialize_memory = [&]() { 110 for (size_t processed = 0; processed < process_size; processed += segment_max) { 111 // Clear segment 112 uintptr_t* const start = (uintptr_t*)(mem + process_start_offset + processed); 113 const size_t remaining = process_size - processed; 114 const size_t segment = MIN2(remaining, segment_max); 115 // Usually, the young marking code has the responsibility to color 116 // raw nulls, before they end up in the old generation. However, the 117 // invisible roots are hidden from the marking code, and therefore 118 // we must color the nulls already here in the initialization. The 119 // color we choose must be store bad for any subsequent stores, regardless 120 // of how many GC flips later it will arrive. That's why we OR in 11 121 // (ZPointerRememberedMask) in the remembered bits, similar to how 122 // forgotten old oops also have 11, for the very same reason. 123 // However, we opportunistically try to color without the 11 remembered 124 // bits, hoping to not get interrupted in the middle of a GC safepoint. 125 // Most of the time, we manage to do that, and can the avoid having GC 126 // barriers trigger slow paths for this. 127 const uintptr_t colored_null = seen_gc_safepoint ? (ZPointerStoreGoodMask | ZPointerRememberedMask) 128 : ZPointerStoreGoodMask; 129 const uintptr_t fill_value = is_reference_type(element_type) ? colored_null : 0; 130 ZUtils::fill(start, segment, fill_value); 131 132 // Safepoint 133 yield_for_safepoint(); 134 135 // Deal with safepoints 136 if (is_reference_type(element_type) && !seen_gc_safepoint && gc_safepoint_happened()) { 137 // The first time we observe a GC safepoint in the yield point, 138 // we have to restart processing with 11 remembered bits. 139 seen_gc_safepoint = true; 140 return false; 141 } 142 } 143 return true; 144 }; 145 146 if (!initialize_memory()) { 147 // Re-color with 11 remset bits if we got intercepted by a GC safepoint 148 const bool result = initialize_memory(); 149 assert(result, "Array initialization should always succeed the second time"); 150 } 151 152 mem_zap_end_padding(mem); 153 154 ZThreadLocalData::clear_invisible_root(_thread); 155 156 // Signal to the ZIterator that this is no longer an invisible root 157 if (UseCompactObjectHeaders) { 158 oopDesc::release_set_mark(mem, _klass->prototype_header()); 159 } else { 160 oopDesc::release_set_mark(mem, markWord::prototype()); 161 } 162 163 return cast_to_oop(mem); 164 }