60 #include "gc/shared/oopStorageSetParState.inline.hpp"
61 #include "gc/shared/parallelCleaning.hpp"
62 #include "gc/shared/preservedMarks.inline.hpp"
63 #include "gc/shared/referencePolicy.hpp"
64 #include "gc/shared/referenceProcessor.hpp"
65 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
66 #include "gc/shared/spaceDecorator.hpp"
67 #include "gc/shared/taskTerminator.hpp"
68 #include "gc/shared/weakProcessor.inline.hpp"
69 #include "gc/shared/workerPolicy.hpp"
70 #include "gc/shared/workerThread.hpp"
71 #include "gc/shared/workerUtils.hpp"
72 #include "logging/log.hpp"
73 #include "memory/iterator.inline.hpp"
74 #include "memory/memoryReserver.hpp"
75 #include "memory/metaspaceUtils.hpp"
76 #include "memory/resourceArea.hpp"
77 #include "memory/universe.hpp"
78 #include "nmt/memTracker.hpp"
79 #include "oops/access.inline.hpp"
80 #include "oops/instanceClassLoaderKlass.inline.hpp"
81 #include "oops/instanceKlass.inline.hpp"
82 #include "oops/instanceMirrorKlass.inline.hpp"
83 #include "oops/methodData.hpp"
84 #include "oops/objArrayKlass.inline.hpp"
85 #include "oops/oop.inline.hpp"
86 #include "runtime/atomicAccess.hpp"
87 #include "runtime/handles.inline.hpp"
88 #include "runtime/java.hpp"
89 #include "runtime/safepoint.hpp"
90 #include "runtime/threads.hpp"
91 #include "runtime/vmThread.hpp"
92 #include "services/memoryService.hpp"
93 #include "utilities/align.hpp"
94 #include "utilities/debug.hpp"
95 #include "utilities/events.hpp"
96 #include "utilities/formatBuffer.hpp"
97 #include "utilities/macros.hpp"
98 #include "utilities/stack.inline.hpp"
99 #if INCLUDE_JVMCI
100 #include "jvmci/jvmci.hpp"
101 #endif
102
103 #include <math.h>
104
105 // All sizes are in HeapWords.
1441
1442 // Split [start, end) evenly for a number of workers and return the
1443 // range for worker_id.
1444 static void split_regions_for_worker(size_t start, size_t end,
1445 uint worker_id, uint num_workers,
1446 size_t* worker_start, size_t* worker_end) {
1447 assert(start < end, "precondition");
1448 assert(num_workers > 0, "precondition");
1449 assert(worker_id < num_workers, "precondition");
1450
1451 size_t num_regions = end - start;
1452 size_t num_regions_per_worker = num_regions / num_workers;
1453 size_t remainder = num_regions % num_workers;
1454 // The first few workers will get one extra.
1455 *worker_start = start + worker_id * num_regions_per_worker
1456 + MIN2(checked_cast<size_t>(worker_id), remainder);
1457 *worker_end = *worker_start + num_regions_per_worker
1458 + (worker_id < remainder ? 1 : 0);
1459 }
1460
1461 void PSParallelCompact::forward_to_new_addr() {
1462 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1463 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1464
1465 struct ForwardTask final : public WorkerTask {
1466 uint _num_workers;
1467
1468 explicit ForwardTask(uint num_workers) :
1469 WorkerTask("PSForward task"),
1470 _num_workers(num_workers) {}
1471
1472 static void forward_objs_in_range(ParCompactionManager* cm,
1473 HeapWord* start,
1474 HeapWord* end,
1475 HeapWord* destination) {
1476 HeapWord* cur_addr = start;
1477 HeapWord* new_addr = destination;
1478
1479 while (cur_addr < end) {
1480 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1481 if (cur_addr >= end) {
1482 return;
1483 }
1484 assert(mark_bitmap()->is_marked(cur_addr), "inv");
1485 oop obj = cast_to_oop(cur_addr);
1486 if (new_addr != cur_addr) {
1487 cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1488 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1489 }
1490 size_t obj_size = obj->size();
1491 new_addr += obj_size;
1492 cur_addr += obj_size;
1493 }
1494 }
1495
1496 void work(uint worker_id) override {
1497 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1498 for (uint id = old_space_id; id < last_space_id; ++id) {
1499 MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1500 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1501 HeapWord* top = sp->top();
1502
1503 if (dense_prefix_addr == top) {
1504 // Empty space
1505 continue;
1506 }
1507
2108
2109 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2110 ParallelCompactData& sd = summary_data();
2111 assert(sd.is_region_aligned(region_start_addr), "precondition");
2112
2113 // Use per-region partial_obj_size to locate the end of the obj, that extends
2114 // to region_start_addr.
2115 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2116 size_t end_region_idx = sd.region_count();
2117 size_t accumulated_size = 0;
2118 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2119 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2120 accumulated_size += cur_partial_obj_size;
2121 if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2122 break;
2123 }
2124 }
2125 return region_start_addr + accumulated_size;
2126 }
2127
2128 // Use region_idx as the destination region, and evacuate all live objs on its
2129 // source regions to this destination region.
2130 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2131 {
2132 ParMarkBitMap* const bitmap = mark_bitmap();
2133 ParallelCompactData& sd = summary_data();
2134 RegionData* const region_ptr = sd.region(region_idx);
2135
2136 // Get the source region and related info.
2137 size_t src_region_idx = region_ptr->source_region();
2138 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2139 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2140 HeapWord* dest_addr = sd.region_to_addr(region_idx);
2141
2142 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2143
2144 // Adjust src_region_idx to prepare for decrementing destination counts (the
2145 // destination count is not decremented when a region is copied to itself).
2146 if (src_region_idx == region_idx) {
2147 src_region_idx += 1;
2223 HeapWord* cur_addr = closure.source();
2224 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2225 src_space_top);
2226 // To handle the case where the final obj in source region extends to next region.
2227 HeapWord* final_obj_start = (end_addr == src_space_top)
2228 ? nullptr
2229 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2230 // Apply closure on objs inside [cur_addr, end_addr)
2231 do {
2232 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2233 if (cur_addr == end_addr) {
2234 break;
2235 }
2236 size_t obj_size;
2237 if (final_obj_start == cur_addr) {
2238 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2239 } else {
2240 // This obj doesn't extend into next region; size() is safe to use.
2241 obj_size = cast_to_oop(cur_addr)->size();
2242 }
2243 closure.do_addr(cur_addr, obj_size);
2244 cur_addr += obj_size;
2245 } while (cur_addr < end_addr && !closure.is_full());
2246
2247 if (closure.is_full()) {
2248 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2249 closure.complete_region(dest_addr, region_ptr);
2250 return;
2251 }
2252
2253 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2254
2255 // Move to the next source region, possibly switching spaces as well. All
2256 // args except end_addr may be modified.
2257 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2258 } while (true);
2259 }
2260
2261 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2262 {
2263 MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2343 }
2344
2345 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2346 {
2347 size_t words = MIN2(partial_obj_size, words_remaining());
2348
2349 // This test is necessary; if omitted, the pointer updates to a partial object
2350 // that crosses the dense prefix boundary could be overwritten.
2351 if (source() != copy_destination()) {
2352 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2353 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2354 }
2355 update_state(words);
2356 }
2357
2358 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2359 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2360 region_ptr->set_completed();
2361 }
2362
2363 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2364 assert(destination() != nullptr, "sanity");
2365 _source = addr;
2366
2367 // The start_array must be updated even if the object is not moving.
2368 if (_start_array != nullptr) {
2369 _start_array->update_for_block(destination(), destination() + words);
2370 }
2371
2372 // Avoid overflow
2373 words = MIN2(words, words_remaining());
2374 assert(words > 0, "inv");
2375
2376 if (copy_destination() != source()) {
2377 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2378 assert(source() != destination(), "inv");
2379 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2380 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2381 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2382 cast_to_oop(copy_destination())->init_mark();
2383 }
2384
2385 update_state(words);
2386 }
2387
2388 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2389 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2390 // Record the shadow region index
2391 region_ptr->set_shadow_region(_shadow);
2392 // Mark the shadow region as filled to indicate the data is ready to be
2393 // copied back
2394 region_ptr->mark_filled();
2395 // Try to copy the content of the shadow region back to its corresponding
2396 // heap region if available; the GC thread that decreases the destination
2397 // count to zero will do the copying otherwise (see
2398 // PSParallelCompact::decrement_destination_counts).
2399 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2400 region_ptr->set_completed();
2401 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2402 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
|
60 #include "gc/shared/oopStorageSetParState.inline.hpp"
61 #include "gc/shared/parallelCleaning.hpp"
62 #include "gc/shared/preservedMarks.inline.hpp"
63 #include "gc/shared/referencePolicy.hpp"
64 #include "gc/shared/referenceProcessor.hpp"
65 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
66 #include "gc/shared/spaceDecorator.hpp"
67 #include "gc/shared/taskTerminator.hpp"
68 #include "gc/shared/weakProcessor.inline.hpp"
69 #include "gc/shared/workerPolicy.hpp"
70 #include "gc/shared/workerThread.hpp"
71 #include "gc/shared/workerUtils.hpp"
72 #include "logging/log.hpp"
73 #include "memory/iterator.inline.hpp"
74 #include "memory/memoryReserver.hpp"
75 #include "memory/metaspaceUtils.hpp"
76 #include "memory/resourceArea.hpp"
77 #include "memory/universe.hpp"
78 #include "nmt/memTracker.hpp"
79 #include "oops/access.inline.hpp"
80 #include "oops/flatArrayKlass.inline.hpp"
81 #include "oops/instanceClassLoaderKlass.inline.hpp"
82 #include "oops/instanceKlass.inline.hpp"
83 #include "oops/instanceMirrorKlass.inline.hpp"
84 #include "oops/methodData.hpp"
85 #include "oops/objArrayKlass.inline.hpp"
86 #include "oops/oop.inline.hpp"
87 #include "runtime/arguments.hpp"
88 #include "runtime/atomicAccess.hpp"
89 #include "runtime/handles.inline.hpp"
90 #include "runtime/java.hpp"
91 #include "runtime/safepoint.hpp"
92 #include "runtime/threads.hpp"
93 #include "runtime/vmThread.hpp"
94 #include "services/memoryService.hpp"
95 #include "utilities/align.hpp"
96 #include "utilities/debug.hpp"
97 #include "utilities/events.hpp"
98 #include "utilities/formatBuffer.hpp"
99 #include "utilities/macros.hpp"
100 #include "utilities/stack.inline.hpp"
101 #if INCLUDE_JVMCI
102 #include "jvmci/jvmci.hpp"
103 #endif
104
105 #include <math.h>
106
107 // All sizes are in HeapWords.
1443
1444 // Split [start, end) evenly for a number of workers and return the
1445 // range for worker_id.
1446 static void split_regions_for_worker(size_t start, size_t end,
1447 uint worker_id, uint num_workers,
1448 size_t* worker_start, size_t* worker_end) {
1449 assert(start < end, "precondition");
1450 assert(num_workers > 0, "precondition");
1451 assert(worker_id < num_workers, "precondition");
1452
1453 size_t num_regions = end - start;
1454 size_t num_regions_per_worker = num_regions / num_workers;
1455 size_t remainder = num_regions % num_workers;
1456 // The first few workers will get one extra.
1457 *worker_start = start + worker_id * num_regions_per_worker
1458 + MIN2(checked_cast<size_t>(worker_id), remainder);
1459 *worker_end = *worker_start + num_regions_per_worker
1460 + (worker_id < remainder ? 1 : 0);
1461 }
1462
1463 static bool safe_to_read_header(size_t words) {
1464 precond(words > 0);
1465
1466 // Safe to read if we have enough words for the full header, i.e., both
1467 // markWord and Klass pointer.
1468 const bool safe = words >= (size_t)oopDesc::header_size();
1469
1470 // If using Compact Object Headers, the full header is inside the markWord,
1471 // so will always be safe to read
1472 assert(!UseCompactObjectHeaders || safe, "Compact Object Headers should always be safe");
1473
1474 return safe;
1475 }
1476
1477 void PSParallelCompact::forward_to_new_addr() {
1478 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1479 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1480
1481 struct ForwardTask final : public WorkerTask {
1482 uint _num_workers;
1483
1484 explicit ForwardTask(uint num_workers) :
1485 WorkerTask("PSForward task"),
1486 _num_workers(num_workers) {}
1487
1488 static bool should_preserve_mark(oop obj, HeapWord* end_addr) {
1489 size_t remaining_words = pointer_delta(end_addr, cast_from_oop<HeapWord*>(obj));
1490
1491 if (Arguments::is_valhalla_enabled() && !safe_to_read_header(remaining_words)) {
1492 // When using Valhalla, it might be necessary to preserve the Valhalla-
1493 // specific bits in the markWord. If the entire object header is
1494 // copied, the correct markWord (with the appropriate Valhalla bits)
1495 // can be safely read from the Klass. However, if the full header is
1496 // not copied, we cannot safely read the Klass to obtain this information.
1497 // In such cases, we always preserve the markWord to ensure that all
1498 // relevant bits, including Valhalla-specific ones, are retained.
1499 return true;
1500 } else {
1501 return obj->mark().must_be_preserved();
1502 }
1503 }
1504
1505 static void forward_objs_in_range(ParCompactionManager* cm,
1506 HeapWord* start,
1507 HeapWord* end,
1508 HeapWord* destination) {
1509 HeapWord* cur_addr = start;
1510 HeapWord* new_addr = destination;
1511
1512 while (cur_addr < end) {
1513 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1514 if (cur_addr >= end) {
1515 return;
1516 }
1517 assert(mark_bitmap()->is_marked(cur_addr), "inv");
1518 oop obj = cast_to_oop(cur_addr);
1519
1520 if (new_addr != cur_addr) {
1521 if (should_preserve_mark(obj, end)) {
1522 cm->preserved_marks()->push_always(obj, obj->mark());
1523 }
1524
1525 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1526 }
1527 size_t obj_size = obj->size();
1528 new_addr += obj_size;
1529 cur_addr += obj_size;
1530 }
1531 }
1532
1533 void work(uint worker_id) override {
1534 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1535 for (uint id = old_space_id; id < last_space_id; ++id) {
1536 MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1537 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1538 HeapWord* top = sp->top();
1539
1540 if (dense_prefix_addr == top) {
1541 // Empty space
1542 continue;
1543 }
1544
2145
2146 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2147 ParallelCompactData& sd = summary_data();
2148 assert(sd.is_region_aligned(region_start_addr), "precondition");
2149
2150 // Use per-region partial_obj_size to locate the end of the obj, that extends
2151 // to region_start_addr.
2152 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2153 size_t end_region_idx = sd.region_count();
2154 size_t accumulated_size = 0;
2155 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2156 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2157 accumulated_size += cur_partial_obj_size;
2158 if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2159 break;
2160 }
2161 }
2162 return region_start_addr + accumulated_size;
2163 }
2164
2165 static markWord safe_mark_word_prototype(HeapWord* cur_addr, HeapWord* end_addr) {
2166 // If the original markWord contains bits that cannot be reconstructed because
2167 // the header cannot be safely read, a placeholder is used. In this case,
2168 // the correct markWord is preserved before compaction and restored after
2169 // compaction completes.
2170 size_t remaining_words = pointer_delta(end_addr, cur_addr);
2171
2172 if (UseCompactObjectHeaders || (Arguments::is_valhalla_enabled() && safe_to_read_header(remaining_words))) {
2173 return cast_to_oop(cur_addr)->klass()->prototype_header();
2174 } else {
2175 return markWord::prototype();
2176 }
2177 }
2178
2179 // Use region_idx as the destination region, and evacuate all live objs on its
2180 // source regions to this destination region.
2181 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2182 {
2183 ParMarkBitMap* const bitmap = mark_bitmap();
2184 ParallelCompactData& sd = summary_data();
2185 RegionData* const region_ptr = sd.region(region_idx);
2186
2187 // Get the source region and related info.
2188 size_t src_region_idx = region_ptr->source_region();
2189 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2190 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2191 HeapWord* dest_addr = sd.region_to_addr(region_idx);
2192
2193 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2194
2195 // Adjust src_region_idx to prepare for decrementing destination counts (the
2196 // destination count is not decremented when a region is copied to itself).
2197 if (src_region_idx == region_idx) {
2198 src_region_idx += 1;
2274 HeapWord* cur_addr = closure.source();
2275 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2276 src_space_top);
2277 // To handle the case where the final obj in source region extends to next region.
2278 HeapWord* final_obj_start = (end_addr == src_space_top)
2279 ? nullptr
2280 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2281 // Apply closure on objs inside [cur_addr, end_addr)
2282 do {
2283 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2284 if (cur_addr == end_addr) {
2285 break;
2286 }
2287 size_t obj_size;
2288 if (final_obj_start == cur_addr) {
2289 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2290 } else {
2291 // This obj doesn't extend into next region; size() is safe to use.
2292 obj_size = cast_to_oop(cur_addr)->size();
2293 }
2294
2295 markWord mark = safe_mark_word_prototype(cur_addr, end_addr);
2296
2297 // Perform the move and update of the object
2298 closure.do_addr(cur_addr, obj_size, mark);
2299
2300 cur_addr += obj_size;
2301 } while (cur_addr < end_addr && !closure.is_full());
2302
2303 if (closure.is_full()) {
2304 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2305 closure.complete_region(dest_addr, region_ptr);
2306 return;
2307 }
2308
2309 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2310
2311 // Move to the next source region, possibly switching spaces as well. All
2312 // args except end_addr may be modified.
2313 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2314 } while (true);
2315 }
2316
2317 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2318 {
2319 MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2399 }
2400
2401 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2402 {
2403 size_t words = MIN2(partial_obj_size, words_remaining());
2404
2405 // This test is necessary; if omitted, the pointer updates to a partial object
2406 // that crosses the dense prefix boundary could be overwritten.
2407 if (source() != copy_destination()) {
2408 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2409 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2410 }
2411 update_state(words);
2412 }
2413
2414 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2415 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2416 region_ptr->set_completed();
2417 }
2418
2419 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words, markWord mark) {
2420 assert(destination() != nullptr, "sanity");
2421 _source = addr;
2422
2423 // The start_array must be updated even if the object is not moving.
2424 if (_start_array != nullptr) {
2425 _start_array->update_for_block(destination(), destination() + words);
2426 }
2427
2428 // Avoid overflow
2429 words = MIN2(words, words_remaining());
2430 assert(words > 0, "inv");
2431
2432 if (copy_destination() != source()) {
2433 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2434 assert(source() != destination(), "inv");
2435 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2436 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2437 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2438 cast_to_oop(copy_destination())->set_mark(mark);
2439 }
2440
2441 update_state(words);
2442 }
2443
2444 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2445 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2446 // Record the shadow region index
2447 region_ptr->set_shadow_region(_shadow);
2448 // Mark the shadow region as filled to indicate the data is ready to be
2449 // copied back
2450 region_ptr->mark_filled();
2451 // Try to copy the content of the shadow region back to its corresponding
2452 // heap region if available; the GC thread that decreases the destination
2453 // count to zero will do the copying otherwise (see
2454 // PSParallelCompact::decrement_destination_counts).
2455 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2456 region_ptr->set_completed();
2457 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2458 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
|