1 /*
2 * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
26 #define SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
27
28 #include "gc/parallel/psCompactionManager.hpp"
29
30 #include "classfile/classLoaderData.hpp"
31 #include "classfile/javaClasses.inline.hpp"
32 #include "gc/parallel/parMarkBitMap.hpp"
33 #include "gc/parallel/psParallelCompact.inline.hpp"
34 #include "gc/parallel/psStringDedup.hpp"
35 #include "gc/shared/partialArrayState.hpp"
36 #include "gc/shared/partialArrayTaskStepper.inline.hpp"
37 #include "gc/shared/taskqueue.inline.hpp"
38 #include "oops/access.inline.hpp"
39 #include "oops/arrayOop.hpp"
40 #include "oops/compressedOops.inline.hpp"
41 #include "oops/objArrayOop.inline.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "utilities/debug.hpp"
44 #include "utilities/globalDefinitions.hpp"
45
46 template <typename T>
47 inline void PCMarkAndPushClosure::do_oop_work(T* p) {
48 _compaction_manager->mark_and_push(p);
49 }
50
51 inline bool ParCompactionManager::steal(int queue_num, ScannerTask& t) {
52 return marking_stacks()->steal(queue_num, t);
53 }
54
55 inline bool ParCompactionManager::steal(int queue_num, size_t& region) {
56 return region_task_queues()->steal(queue_num, region);
57 }
58
59 inline void ParCompactionManager::push(oop obj) {
60 marking_stack()->push(ScannerTask(obj));
61 }
62
63 inline void ParCompactionManager::push(PartialArrayState* stat) {
64 marking_stack()->push(ScannerTask(stat));
65 }
66
67 void ParCompactionManager::push_region(size_t index)
68 {
69 #ifdef ASSERT
70 const ParallelCompactData& sd = PSParallelCompact::summary_data();
71 ParallelCompactData::RegionData* const region_ptr = sd.region(index);
72 assert(region_ptr->claimed(), "must be claimed");
73 assert(region_ptr->_pushed++ == 0, "should only be pushed once");
74 #endif
75 region_stack()->push(index);
76 }
77
78 template <typename T>
79 inline void ParCompactionManager::mark_and_push(T* p) {
80 T heap_oop = RawAccess<>::oop_load(p);
81 if (!CompressedOops::is_null(heap_oop)) {
82 oop obj = CompressedOops::decode_not_null(heap_oop);
83 assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
84
85 if (mark_bitmap()->mark_obj(obj)) {
86 if (StringDedup::is_enabled() &&
87 java_lang_String::is_instance(obj) &&
88 psStringDedup::is_candidate_from_mark(obj)) {
89 _string_dedup_requests.add(obj);
90 }
91
92 ContinuationGCSupport::transform_stack_chunk(obj);
93
94 assert(_marking_stats_cache != nullptr, "inv");
95 _marking_stats_cache->push(obj, obj->size());
96 push(obj);
97 }
98 }
99 }
100
101 inline void ParCompactionManager::FollowStackClosure::do_void() {
102 _compaction_manager->follow_marking_stacks();
103 if (_terminator != nullptr) {
104 steal_marking_work(*_terminator, _worker_id);
105 }
106 }
107
108 template <typename T>
109 inline void follow_array_specialized(objArrayOop obj, size_t start, size_t end, ParCompactionManager* cm) {
110 assert(start <= end, "invariant");
111 T* const base = (T*)obj->base();
112 T* const beg = base + start;
113 T* const chunk_end = base + end;
114
115 // Push the non-null elements of the next stride on the marking stack.
116 for (T* e = beg; e < chunk_end; e++) {
117 cm->mark_and_push<T>(e);
118 }
119 }
120
121 inline void ParCompactionManager::follow_array(objArrayOop obj, size_t start, size_t end) {
122 if (UseCompressedOops) {
123 follow_array_specialized<narrowOop>(obj, start, end, this);
124 } else {
125 follow_array_specialized<oop>(obj, start, end, this);
126 }
127 }
128
129 inline void ParCompactionManager::follow_contents(const ScannerTask& task, bool stolen) {
130 if (task.is_partial_array_state()) {
131 assert(PSParallelCompact::mark_bitmap()->is_marked(task.to_partial_array_state()->source()), "should be marked");
132 process_array_chunk(task.to_partial_array_state(), stolen);
133 } else {
134 oop obj = task.to_oop();
135 assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked");
136 if (obj->is_refArray()) {
137 push_objArray(obj);
138 } else {
139 obj->oop_iterate(&_mark_and_push_closure);
140 }
141 }
142 }
143
144 inline void ParCompactionManager::MarkingStatsCache::push(size_t region_id, size_t live_words) {
145 size_t index = (region_id & entry_mask);
146 if (entries[index].region_id == region_id) {
147 // Hit
148 entries[index].live_words += live_words;
149 return;
150 }
151 // Miss
152 if (entries[index].live_words != 0) {
153 evict(index);
154 }
155 entries[index].region_id = region_id;
156 entries[index].live_words = live_words;
157 }
158
159 inline void ParCompactionManager::MarkingStatsCache::push(oop obj, size_t live_words) {
160 ParallelCompactData& data = PSParallelCompact::summary_data();
161 const size_t region_size = ParallelCompactData::RegionSize;
162
163 HeapWord* addr = cast_from_oop<HeapWord*>(obj);
164 const size_t start_region_id = data.addr_to_region_idx(addr);
165 const size_t end_region_id = data.addr_to_region_idx(addr + live_words - 1);
166 if (start_region_id == end_region_id) {
167 // Completely inside this region
168 push(start_region_id, live_words);
169 return;
170 }
171
172 // First region
173 push(start_region_id, region_size - data.region_offset(addr));
174
175 // Middle regions; bypass cache
176 for (size_t i = start_region_id + 1; i < end_region_id; ++i) {
177 data.region(i)->set_partial_obj_size(region_size);
178 data.region(i)->set_partial_obj_addr(addr);
179 }
180
181 // Last region; bypass cache
182 const size_t end_offset = data.region_offset(addr + live_words - 1);
183 data.region(end_region_id)->set_partial_obj_size(end_offset + 1);
184 data.region(end_region_id)->set_partial_obj_addr(addr);
185 }
186
187 inline void ParCompactionManager::MarkingStatsCache::evict(size_t index) {
188 ParallelCompactData& data = PSParallelCompact::summary_data();
189 // flush to global data
190 data.region(entries[index].region_id)->add_live_obj(entries[index].live_words);
191 }
192
193 inline void ParCompactionManager::MarkingStatsCache::evict_all() {
194 for (size_t i = 0; i < num_entries; ++i) {
195 if (entries[i].live_words != 0) {
196 evict(i);
197 entries[i].live_words = 0;
198 }
199 }
200 }
201
202 inline void ParCompactionManager::create_marking_stats_cache() {
203 assert(_marking_stats_cache == nullptr, "precondition");
204 _marking_stats_cache = new MarkingStatsCache();
205 }
206
207 inline void ParCompactionManager::flush_and_destroy_marking_stats_cache() {
208 _marking_stats_cache->evict_all();
209 delete _marking_stats_cache;
210 _marking_stats_cache = nullptr;
211 }
212 #endif // SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP