1 /*
  2  * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
 26 #define SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
 27 
 28 #include "gc/parallel/psCompactionManager.hpp"
 29 
 30 #include "classfile/classLoaderData.hpp"
 31 #include "classfile/javaClasses.inline.hpp"
 32 #include "gc/parallel/parMarkBitMap.hpp"
 33 #include "gc/parallel/psParallelCompact.inline.hpp"
 34 #include "gc/parallel/psStringDedup.hpp"
 35 #include "gc/shared/partialArrayState.hpp"
 36 #include "gc/shared/partialArrayTaskStepper.inline.hpp"
 37 #include "gc/shared/taskqueue.inline.hpp"
 38 #include "oops/access.inline.hpp"
 39 #include "oops/arrayOop.hpp"
 40 #include "oops/compressedOops.inline.hpp"
 41 #include "oops/objArrayOop.inline.hpp"
 42 #include "oops/oop.inline.hpp"
 43 #include "utilities/debug.hpp"
 44 #include "utilities/globalDefinitions.hpp"
 45 
 46 template <typename T>
 47 inline void PCMarkAndPushClosure::do_oop_work(T* p) {
 48   _compaction_manager->mark_and_push(p);
 49 }
 50 
 51 inline bool ParCompactionManager::steal(int queue_num, ScannerTask& t) {
 52   return marking_stacks()->steal(queue_num, t);
 53 }
 54 
 55 inline bool ParCompactionManager::steal(int queue_num, size_t& region) {
 56   return region_task_queues()->steal(queue_num, region);
 57 }
 58 
 59 void ParCompactionManager::push_region(size_t index)
 60 {
 61 #ifdef ASSERT
 62   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 63   ParallelCompactData::RegionData* const region_ptr = sd.region(index);
 64   assert(region_ptr->claimed(), "must be claimed");
 65   assert(region_ptr->_pushed++ == 0, "should only be pushed once");
 66 #endif
 67   region_stack()->push(index);
 68 }
 69 
 70 template <typename T>
 71 inline void ParCompactionManager::mark_and_push(T* p) {
 72   T heap_oop = RawAccess<>::oop_load(p);
 73   if (CompressedOops::is_null(heap_oop)) {
 74     return;
 75   }
 76 
 77   oop obj = CompressedOops::decode_not_null(heap_oop);
 78   if (!mark_bitmap()->mark_obj(obj)) {
 79     // Marked by another worker.
 80     return;
 81   }
 82 
 83   if (StringDedup::is_enabled() &&
 84       java_lang_String::is_instance(obj) &&
 85       psStringDedup::is_candidate_from_mark(obj)) {
 86     _string_dedup_requests.add(obj);
 87   }
 88 
 89   ContinuationGCSupport::transform_stack_chunk(obj);
 90 
 91   _marking_stats_cache->push(obj, obj->size());
 92   marking_stack()->push(ScannerTask(obj));
 93 }
 94 
 95 inline void ParCompactionManager::FollowStackClosure::do_void() {
 96   _compaction_manager->follow_marking_stacks();
 97   if (_terminator != nullptr) {
 98     steal_marking_work(*_terminator, _worker_id);
 99   }
100 }
101 
102 template <typename T>
103 inline void follow_array_specialized(objArrayOop obj, size_t start, size_t end, ParCompactionManager* cm) {
104   assert(start <= end, "invariant");
105   T* const base = (T*)obj->base();
106   T* const beg = base + start;
107   T* const chunk_end = base + end;
108 
109   // Push the non-null elements of the next stride on the marking stack.
110   for (T* e = beg; e < chunk_end; e++) {
111     cm->mark_and_push<T>(e);
112   }
113 }
114 
115 inline void ParCompactionManager::follow_array(objArrayOop obj, size_t start, size_t end) {
116   if (UseCompressedOops) {
117     follow_array_specialized<narrowOop>(obj, start, end, this);
118   } else {
119     follow_array_specialized<oop>(obj, start, end, this);
120   }
121 }
122 
123 inline void ParCompactionManager::follow_contents(const ScannerTask& task, bool stolen) {
124   if (task.is_partial_array_state()) {
125     assert(PSParallelCompact::mark_bitmap()->is_marked(task.to_partial_array_state()->source()), "should be marked");
126     process_array_chunk(task.to_partial_array_state(), stolen);
127   } else {
128     oop obj = task.to_oop();
129     assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked");
130     if (obj->is_objArray()) {
131       push_objArray(obj);
132     } else {
133       obj->oop_iterate(&_mark_and_push_closure);
134     }
135   }
136 }
137 
138 inline void ParCompactionManager::MarkingStatsCache::push(size_t region_id, size_t live_words) {
139   size_t index = (region_id & entry_mask);
140   if (entries[index].region_id == region_id) {
141     // Hit
142     entries[index].live_words += live_words;
143     return;
144   }
145   // Miss
146   if (entries[index].live_words != 0) {
147     evict(index);
148   }
149   entries[index].region_id = region_id;
150   entries[index].live_words = live_words;
151 }
152 
153 inline void ParCompactionManager::MarkingStatsCache::push(oop obj, size_t live_words) {
154   ParallelCompactData& data = PSParallelCompact::summary_data();
155   const size_t region_size = ParallelCompactData::RegionSize;
156 
157   HeapWord* addr = cast_from_oop<HeapWord*>(obj);
158   const size_t start_region_id = data.addr_to_region_idx(addr);
159   const size_t end_region_id = data.addr_to_region_idx(addr + live_words - 1);
160   if (start_region_id == end_region_id) {
161     // Completely inside this region
162     push(start_region_id, live_words);
163     return;
164   }
165 
166   // First region
167   push(start_region_id, region_size - data.region_offset(addr));
168 
169   // Middle regions; bypass cache
170   for (size_t i = start_region_id + 1; i < end_region_id; ++i) {
171     data.region(i)->set_partial_obj_size(region_size);
172     data.region(i)->set_partial_obj_addr(addr);
173   }
174 
175   // Last region; bypass cache
176   const size_t end_offset = data.region_offset(addr + live_words - 1);
177   data.region(end_region_id)->set_partial_obj_size(end_offset + 1);
178   data.region(end_region_id)->set_partial_obj_addr(addr);
179 }
180 
181 inline void ParCompactionManager::MarkingStatsCache::evict(size_t index) {
182   ParallelCompactData& data = PSParallelCompact::summary_data();
183   // flush to global data
184   data.region(entries[index].region_id)->add_live_obj(entries[index].live_words);
185 }
186 
187 inline void ParCompactionManager::MarkingStatsCache::evict_all() {
188   for (size_t i = 0; i < num_entries; ++i) {
189     if (entries[i].live_words != 0) {
190       evict(i);
191       entries[i].live_words = 0;
192     }
193   }
194 }
195 
196 inline void ParCompactionManager::create_marking_stats_cache() {
197   assert(_marking_stats_cache == nullptr, "precondition");
198   _marking_stats_cache = new MarkingStatsCache();
199 }
200 
201 inline void ParCompactionManager::flush_and_destroy_marking_stats_cache() {
202   _marking_stats_cache->evict_all();
203   delete _marking_stats_cache;
204   _marking_stats_cache = nullptr;
205 }
206 #endif // SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP