1 /*
  2  * Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
 26 #define SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
 27 
 28 #include "gc/parallel/psCompactionManager.hpp"
 29 
 30 #include "classfile/classLoaderData.hpp"
 31 #include "classfile/javaClasses.inline.hpp"
 32 #include "gc/parallel/parMarkBitMap.hpp"
 33 #include "gc/parallel/psParallelCompact.inline.hpp"
 34 #include "gc/parallel/psStringDedup.hpp"
 35 #include "gc/shared/partialArrayState.hpp"
 36 #include "gc/shared/partialArrayTaskStepper.inline.hpp"
 37 #include "gc/shared/taskqueue.inline.hpp"
 38 #include "oops/access.inline.hpp"
 39 #include "oops/arrayOop.hpp"
 40 #include "oops/compressedOops.inline.hpp"
 41 #include "oops/objArrayOop.inline.hpp"
 42 #include "oops/oop.inline.hpp"
 43 #include "utilities/debug.hpp"
 44 #include "utilities/globalDefinitions.hpp"
 45 
 46 template <typename T>
 47 inline void PCMarkAndPushClosure::do_oop_work(T* p) {
 48   _compaction_manager->mark_and_push(p);
 49 }
 50 
 51 inline bool ParCompactionManager::steal(int queue_num, ScannerTask& t) {
 52   return marking_stacks()->steal(queue_num, t);
 53 }
 54 
 55 inline bool ParCompactionManager::steal(int queue_num, size_t& region) {
 56   return region_task_queues()->steal(queue_num, region);
 57 }
 58 
 59 void ParCompactionManager::push_region(size_t index)
 60 {
 61 #ifdef ASSERT
 62   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 63   ParallelCompactData::RegionData* const region_ptr = sd.region(index);
 64   assert(region_ptr->claimed(), "must be claimed");
 65   assert(region_ptr->_pushed++ == 0, "should only be pushed once");
 66 #endif
 67   region_stack()->push(index);
 68 }
 69 
 70 template <typename T>
 71 inline void ParCompactionManager::mark_and_push(T* p) {
 72   T heap_oop = RawAccess<>::oop_load(p);
 73   if (CompressedOops::is_null(heap_oop)) {
 74     return;
 75   }
 76 
 77   oop obj = CompressedOops::decode_not_null(heap_oop);
 78   if (!mark_bitmap()->mark_obj(obj)) {
 79     // Marked by another worker.
 80     return;
 81   }
 82 
 83   if (StringDedup::is_enabled() &&
 84       java_lang_String::is_instance(obj) &&
 85       psStringDedup::is_candidate_from_mark(obj)) {
 86     _string_dedup_requests.add(obj);
 87   }
 88 
 89   ContinuationGCSupport::transform_stack_chunk(obj);
 90 
 91   _marking_stats_cache->push(obj, obj->size());
 92   marking_stack()->push(ScannerTask(obj));
 93 }
 94 
 95 inline void ParCompactionManager::FollowStackClosure::do_void() {
 96   _compaction_manager->follow_marking_stacks();
 97   if (_terminator != nullptr) {
 98     steal_marking_work(*_terminator, _worker_id);
 99   }
100 }
101 
102 inline void ParCompactionManager::follow_array(objArrayOop obj, size_t start, size_t end) {
103   obj->oop_iterate_elements_range(&_mark_and_push_closure,
104                                   checked_cast<int>(start),
105                                   checked_cast<int>(end));
106 }
107 
108 inline void ParCompactionManager::follow_contents(const ScannerTask& task, bool stolen) {
109   if (task.is_partial_array_state()) {
110     assert(PSParallelCompact::mark_bitmap()->is_marked(task.to_partial_array_state()->source()), "should be marked");
111     process_array_chunk(task.to_partial_array_state(), stolen);
112   } else {
113     oop obj = task.to_oop();
114     assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked");
115     if (obj->is_array_with_oops()) {
116       push_objArray((objArrayOop)obj);
117     } else {
118       obj->oop_iterate(&_mark_and_push_closure);
119     }
120   }
121 }
122 
123 inline void ParCompactionManager::MarkingStatsCache::push(size_t region_id, size_t live_words) {
124   size_t index = (region_id & entry_mask);
125   if (entries[index].region_id == region_id) {
126     // Hit
127     entries[index].live_words += live_words;
128     return;
129   }
130   // Miss
131   if (entries[index].live_words != 0) {
132     evict(index);
133   }
134   entries[index].region_id = region_id;
135   entries[index].live_words = live_words;
136 }
137 
138 inline void ParCompactionManager::MarkingStatsCache::push(oop obj, size_t live_words) {
139   ParallelCompactData& data = PSParallelCompact::summary_data();
140   const size_t region_size = ParallelCompactData::RegionSize;
141 
142   HeapWord* addr = cast_from_oop<HeapWord*>(obj);
143   const size_t start_region_id = data.addr_to_region_idx(addr);
144   const size_t end_region_id = data.addr_to_region_idx(addr + live_words - 1);
145   if (start_region_id == end_region_id) {
146     // Completely inside this region
147     push(start_region_id, live_words);
148     return;
149   }
150 
151   // First region
152   push(start_region_id, region_size - data.region_offset(addr));
153 
154   // Middle regions; bypass cache
155   for (size_t i = start_region_id + 1; i < end_region_id; ++i) {
156     data.region(i)->set_partial_obj_size(region_size);
157     data.region(i)->set_partial_obj_addr(addr);
158   }
159 
160   // Last region; bypass cache
161   const size_t end_offset = data.region_offset(addr + live_words - 1);
162   data.region(end_region_id)->set_partial_obj_size(end_offset + 1);
163   data.region(end_region_id)->set_partial_obj_addr(addr);
164 }
165 
166 inline void ParCompactionManager::MarkingStatsCache::evict(size_t index) {
167   ParallelCompactData& data = PSParallelCompact::summary_data();
168   // flush to global data
169   data.region(entries[index].region_id)->add_live_obj(entries[index].live_words);
170 }
171 
172 inline void ParCompactionManager::MarkingStatsCache::evict_all() {
173   for (size_t i = 0; i < num_entries; ++i) {
174     if (entries[i].live_words != 0) {
175       evict(i);
176       entries[i].live_words = 0;
177     }
178   }
179 }
180 
181 inline void ParCompactionManager::create_marking_stats_cache() {
182   assert(_marking_stats_cache == nullptr, "precondition");
183   _marking_stats_cache = new MarkingStatsCache();
184 }
185 
186 inline void ParCompactionManager::flush_and_destroy_marking_stats_cache() {
187   _marking_stats_cache->evict_all();
188   delete _marking_stats_cache;
189   _marking_stats_cache = nullptr;
190 }
191 #endif // SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP