61 #include "gc/shared/oopStorageSetParState.inline.hpp"
62 #include "gc/shared/parallelCleaning.hpp"
63 #include "gc/shared/preservedMarks.inline.hpp"
64 #include "gc/shared/referencePolicy.hpp"
65 #include "gc/shared/referenceProcessor.hpp"
66 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
67 #include "gc/shared/spaceDecorator.hpp"
68 #include "gc/shared/taskTerminator.hpp"
69 #include "gc/shared/weakProcessor.inline.hpp"
70 #include "gc/shared/workerPolicy.hpp"
71 #include "gc/shared/workerThread.hpp"
72 #include "gc/shared/workerUtils.hpp"
73 #include "logging/log.hpp"
74 #include "memory/iterator.inline.hpp"
75 #include "memory/memoryReserver.hpp"
76 #include "memory/metaspaceUtils.hpp"
77 #include "memory/resourceArea.hpp"
78 #include "memory/universe.hpp"
79 #include "nmt/memTracker.hpp"
80 #include "oops/access.inline.hpp"
81 #include "oops/instanceClassLoaderKlass.inline.hpp"
82 #include "oops/instanceKlass.inline.hpp"
83 #include "oops/instanceMirrorKlass.inline.hpp"
84 #include "oops/methodData.hpp"
85 #include "oops/objArrayKlass.inline.hpp"
86 #include "oops/oop.inline.hpp"
87 #include "runtime/atomicAccess.hpp"
88 #include "runtime/handles.inline.hpp"
89 #include "runtime/java.hpp"
90 #include "runtime/safepoint.hpp"
91 #include "runtime/threads.hpp"
92 #include "runtime/vmThread.hpp"
93 #include "services/memoryService.hpp"
94 #include "utilities/align.hpp"
95 #include "utilities/debug.hpp"
96 #include "utilities/events.hpp"
97 #include "utilities/formatBuffer.hpp"
98 #include "utilities/macros.hpp"
99 #include "utilities/stack.inline.hpp"
100 #if INCLUDE_JVMCI
101 #include "jvmci/jvmci.hpp"
102 #endif
103
104 #include <math.h>
105
106 // All sizes are in HeapWords.
1447
1448 // Split [start, end) evenly for a number of workers and return the
1449 // range for worker_id.
1450 static void split_regions_for_worker(size_t start, size_t end,
1451 uint worker_id, uint num_workers,
1452 size_t* worker_start, size_t* worker_end) {
1453 assert(start < end, "precondition");
1454 assert(num_workers > 0, "precondition");
1455 assert(worker_id < num_workers, "precondition");
1456
1457 size_t num_regions = end - start;
1458 size_t num_regions_per_worker = num_regions / num_workers;
1459 size_t remainder = num_regions % num_workers;
1460 // The first few workers will get one extra.
1461 *worker_start = start + worker_id * num_regions_per_worker
1462 + MIN2(checked_cast<size_t>(worker_id), remainder);
1463 *worker_end = *worker_start + num_regions_per_worker
1464 + (worker_id < remainder ? 1 : 0);
1465 }
1466
1467 void PSParallelCompact::forward_to_new_addr() {
1468 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1469 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1470
1471 struct ForwardTask final : public WorkerTask {
1472 uint _num_workers;
1473
1474 explicit ForwardTask(uint num_workers) :
1475 WorkerTask("PSForward task"),
1476 _num_workers(num_workers) {}
1477
1478 static void forward_objs_in_range(ParCompactionManager* cm,
1479 HeapWord* start,
1480 HeapWord* end,
1481 HeapWord* destination) {
1482 HeapWord* cur_addr = start;
1483 HeapWord* new_addr = destination;
1484
1485 while (cur_addr < end) {
1486 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1487 if (cur_addr >= end) {
1488 return;
1489 }
1490 assert(mark_bitmap()->is_marked(cur_addr), "inv");
1491 oop obj = cast_to_oop(cur_addr);
1492 if (new_addr != cur_addr) {
1493 cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1494 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1495 }
1496 size_t obj_size = obj->size();
1497 new_addr += obj_size;
1498 cur_addr += obj_size;
1499 }
1500 }
1501
1502 void work(uint worker_id) override {
1503 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1504 for (uint id = old_space_id; id < last_space_id; ++id) {
1505 MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1506 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1507 HeapWord* top = sp->top();
1508
1509 if (dense_prefix_addr == top) {
1510 // Empty space
1511 continue;
1512 }
1513
2114
2115 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2116 ParallelCompactData& sd = summary_data();
2117 assert(sd.is_region_aligned(region_start_addr), "precondition");
2118
2119 // Use per-region partial_obj_size to locate the end of the obj, that extends
2120 // to region_start_addr.
2121 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2122 size_t end_region_idx = sd.region_count();
2123 size_t accumulated_size = 0;
2124 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2125 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2126 accumulated_size += cur_partial_obj_size;
2127 if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2128 break;
2129 }
2130 }
2131 return region_start_addr + accumulated_size;
2132 }
2133
2134 // Use region_idx as the destination region, and evacuate all live objs on its
2135 // source regions to this destination region.
2136 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2137 {
2138 ParMarkBitMap* const bitmap = mark_bitmap();
2139 ParallelCompactData& sd = summary_data();
2140 RegionData* const region_ptr = sd.region(region_idx);
2141
2142 // Get the source region and related info.
2143 size_t src_region_idx = region_ptr->source_region();
2144 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2145 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2146 HeapWord* dest_addr = sd.region_to_addr(region_idx);
2147
2148 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2149
2150 // Adjust src_region_idx to prepare for decrementing destination counts (the
2151 // destination count is not decremented when a region is copied to itself).
2152 if (src_region_idx == region_idx) {
2153 src_region_idx += 1;
2229 HeapWord* cur_addr = closure.source();
2230 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2231 src_space_top);
2232 // To handle the case where the final obj in source region extends to next region.
2233 HeapWord* final_obj_start = (end_addr == src_space_top)
2234 ? nullptr
2235 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2236 // Apply closure on objs inside [cur_addr, end_addr)
2237 do {
2238 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2239 if (cur_addr == end_addr) {
2240 break;
2241 }
2242 size_t obj_size;
2243 if (final_obj_start == cur_addr) {
2244 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2245 } else {
2246 // This obj doesn't extend into next region; size() is safe to use.
2247 obj_size = cast_to_oop(cur_addr)->size();
2248 }
2249 closure.do_addr(cur_addr, obj_size);
2250 cur_addr += obj_size;
2251 } while (cur_addr < end_addr && !closure.is_full());
2252
2253 if (closure.is_full()) {
2254 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2255 closure.complete_region(dest_addr, region_ptr);
2256 return;
2257 }
2258
2259 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2260
2261 // Move to the next source region, possibly switching spaces as well. All
2262 // args except end_addr may be modified.
2263 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2264 } while (true);
2265 }
2266
2267 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2268 {
2269 MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2349 }
2350
2351 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2352 {
2353 size_t words = MIN2(partial_obj_size, words_remaining());
2354
2355 // This test is necessary; if omitted, the pointer updates to a partial object
2356 // that crosses the dense prefix boundary could be overwritten.
2357 if (source() != copy_destination()) {
2358 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2359 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2360 }
2361 update_state(words);
2362 }
2363
2364 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2365 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2366 region_ptr->set_completed();
2367 }
2368
2369 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2370 assert(destination() != nullptr, "sanity");
2371 _source = addr;
2372
2373 // The start_array must be updated even if the object is not moving.
2374 if (_start_array != nullptr) {
2375 _start_array->update_for_block(destination(), destination() + words);
2376 }
2377
2378 // Avoid overflow
2379 words = MIN2(words, words_remaining());
2380 assert(words > 0, "inv");
2381
2382 if (copy_destination() != source()) {
2383 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2384 assert(source() != destination(), "inv");
2385 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2386 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2387 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2388 cast_to_oop(copy_destination())->init_mark();
2389 }
2390
2391 update_state(words);
2392 }
2393
2394 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2395 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2396 // Record the shadow region index
2397 region_ptr->set_shadow_region(_shadow);
2398 // Mark the shadow region as filled to indicate the data is ready to be
2399 // copied back
2400 region_ptr->mark_filled();
2401 // Try to copy the content of the shadow region back to its corresponding
2402 // heap region if available; the GC thread that decreases the destination
2403 // count to zero will do the copying otherwise (see
2404 // PSParallelCompact::decrement_destination_counts).
2405 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2406 region_ptr->set_completed();
2407 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2408 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
|
61 #include "gc/shared/oopStorageSetParState.inline.hpp"
62 #include "gc/shared/parallelCleaning.hpp"
63 #include "gc/shared/preservedMarks.inline.hpp"
64 #include "gc/shared/referencePolicy.hpp"
65 #include "gc/shared/referenceProcessor.hpp"
66 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
67 #include "gc/shared/spaceDecorator.hpp"
68 #include "gc/shared/taskTerminator.hpp"
69 #include "gc/shared/weakProcessor.inline.hpp"
70 #include "gc/shared/workerPolicy.hpp"
71 #include "gc/shared/workerThread.hpp"
72 #include "gc/shared/workerUtils.hpp"
73 #include "logging/log.hpp"
74 #include "memory/iterator.inline.hpp"
75 #include "memory/memoryReserver.hpp"
76 #include "memory/metaspaceUtils.hpp"
77 #include "memory/resourceArea.hpp"
78 #include "memory/universe.hpp"
79 #include "nmt/memTracker.hpp"
80 #include "oops/access.inline.hpp"
81 #include "oops/flatArrayKlass.inline.hpp"
82 #include "oops/instanceClassLoaderKlass.inline.hpp"
83 #include "oops/instanceKlass.inline.hpp"
84 #include "oops/instanceMirrorKlass.inline.hpp"
85 #include "oops/methodData.hpp"
86 #include "oops/objArrayKlass.inline.hpp"
87 #include "oops/oop.inline.hpp"
88 #include "runtime/arguments.hpp"
89 #include "runtime/atomicAccess.hpp"
90 #include "runtime/handles.inline.hpp"
91 #include "runtime/java.hpp"
92 #include "runtime/safepoint.hpp"
93 #include "runtime/threads.hpp"
94 #include "runtime/vmThread.hpp"
95 #include "services/memoryService.hpp"
96 #include "utilities/align.hpp"
97 #include "utilities/debug.hpp"
98 #include "utilities/events.hpp"
99 #include "utilities/formatBuffer.hpp"
100 #include "utilities/macros.hpp"
101 #include "utilities/stack.inline.hpp"
102 #if INCLUDE_JVMCI
103 #include "jvmci/jvmci.hpp"
104 #endif
105
106 #include <math.h>
107
108 // All sizes are in HeapWords.
1449
1450 // Split [start, end) evenly for a number of workers and return the
1451 // range for worker_id.
1452 static void split_regions_for_worker(size_t start, size_t end,
1453 uint worker_id, uint num_workers,
1454 size_t* worker_start, size_t* worker_end) {
1455 assert(start < end, "precondition");
1456 assert(num_workers > 0, "precondition");
1457 assert(worker_id < num_workers, "precondition");
1458
1459 size_t num_regions = end - start;
1460 size_t num_regions_per_worker = num_regions / num_workers;
1461 size_t remainder = num_regions % num_workers;
1462 // The first few workers will get one extra.
1463 *worker_start = start + worker_id * num_regions_per_worker
1464 + MIN2(checked_cast<size_t>(worker_id), remainder);
1465 *worker_end = *worker_start + num_regions_per_worker
1466 + (worker_id < remainder ? 1 : 0);
1467 }
1468
1469 static bool safe_to_read_header(size_t words) {
1470 precond(words > 0);
1471
1472 // Safe to read if we have enough words for the full header, i.e., both
1473 // markWord and Klass pointer.
1474 const bool safe = words >= (size_t)oopDesc::header_size();
1475
1476 // If using Compact Object Headers, the full header is inside the markWord,
1477 // so will always be safe to read
1478 assert(!UseCompactObjectHeaders || safe, "Compact Object Headers should always be safe");
1479
1480 return safe;
1481 }
1482
1483 void PSParallelCompact::forward_to_new_addr() {
1484 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1485 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1486
1487 struct ForwardTask final : public WorkerTask {
1488 uint _num_workers;
1489
1490 explicit ForwardTask(uint num_workers) :
1491 WorkerTask("PSForward task"),
1492 _num_workers(num_workers) {}
1493
1494 static bool should_preserve_mark(oop obj, HeapWord* end_addr) {
1495 size_t remaining_words = pointer_delta(end_addr, cast_from_oop<HeapWord*>(obj));
1496
1497 if (Arguments::is_valhalla_enabled() && !safe_to_read_header(remaining_words)) {
1498 // When using Valhalla, it might be necessary to preserve the Valhalla-
1499 // specific bits in the markWord. If the entire object header is
1500 // copied, the correct markWord (with the appropriate Valhalla bits)
1501 // can be safely read from the Klass. However, if the full header is
1502 // not copied, we cannot safely read the Klass to obtain this information.
1503 // In such cases, we always preserve the markWord to ensure that all
1504 // relevant bits, including Valhalla-specific ones, are retained.
1505 return true;
1506 } else {
1507 return obj->mark().must_be_preserved();
1508 }
1509 }
1510
1511 static void forward_objs_in_range(ParCompactionManager* cm,
1512 HeapWord* start,
1513 HeapWord* end,
1514 HeapWord* destination) {
1515 HeapWord* cur_addr = start;
1516 HeapWord* new_addr = destination;
1517
1518 while (cur_addr < end) {
1519 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1520 if (cur_addr >= end) {
1521 return;
1522 }
1523 assert(mark_bitmap()->is_marked(cur_addr), "inv");
1524 oop obj = cast_to_oop(cur_addr);
1525
1526 if (new_addr != cur_addr) {
1527 if (should_preserve_mark(obj, end)) {
1528 cm->preserved_marks()->push_always(obj, obj->mark());
1529 }
1530
1531 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1532 }
1533 size_t obj_size = obj->size();
1534 new_addr += obj_size;
1535 cur_addr += obj_size;
1536 }
1537 }
1538
1539 void work(uint worker_id) override {
1540 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1541 for (uint id = old_space_id; id < last_space_id; ++id) {
1542 MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1543 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1544 HeapWord* top = sp->top();
1545
1546 if (dense_prefix_addr == top) {
1547 // Empty space
1548 continue;
1549 }
1550
2151
2152 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2153 ParallelCompactData& sd = summary_data();
2154 assert(sd.is_region_aligned(region_start_addr), "precondition");
2155
2156 // Use per-region partial_obj_size to locate the end of the obj, that extends
2157 // to region_start_addr.
2158 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2159 size_t end_region_idx = sd.region_count();
2160 size_t accumulated_size = 0;
2161 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2162 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2163 accumulated_size += cur_partial_obj_size;
2164 if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2165 break;
2166 }
2167 }
2168 return region_start_addr + accumulated_size;
2169 }
2170
2171 static markWord safe_mark_word_prototype(HeapWord* cur_addr, HeapWord* end_addr) {
2172 // If the original markWord contains bits that cannot be reconstructed because
2173 // the header cannot be safely read, a placeholder is used. In this case,
2174 // the correct markWord is preserved before compaction and restored after
2175 // compaction completes.
2176 size_t remaining_words = pointer_delta(end_addr, cur_addr);
2177
2178 if (UseCompactObjectHeaders || (Arguments::is_valhalla_enabled() && safe_to_read_header(remaining_words))) {
2179 return cast_to_oop(cur_addr)->klass()->prototype_header();
2180 } else {
2181 return markWord::prototype();
2182 }
2183 }
2184
2185 // Use region_idx as the destination region, and evacuate all live objs on its
2186 // source regions to this destination region.
2187 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2188 {
2189 ParMarkBitMap* const bitmap = mark_bitmap();
2190 ParallelCompactData& sd = summary_data();
2191 RegionData* const region_ptr = sd.region(region_idx);
2192
2193 // Get the source region and related info.
2194 size_t src_region_idx = region_ptr->source_region();
2195 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2196 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2197 HeapWord* dest_addr = sd.region_to_addr(region_idx);
2198
2199 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2200
2201 // Adjust src_region_idx to prepare for decrementing destination counts (the
2202 // destination count is not decremented when a region is copied to itself).
2203 if (src_region_idx == region_idx) {
2204 src_region_idx += 1;
2280 HeapWord* cur_addr = closure.source();
2281 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2282 src_space_top);
2283 // To handle the case where the final obj in source region extends to next region.
2284 HeapWord* final_obj_start = (end_addr == src_space_top)
2285 ? nullptr
2286 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2287 // Apply closure on objs inside [cur_addr, end_addr)
2288 do {
2289 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2290 if (cur_addr == end_addr) {
2291 break;
2292 }
2293 size_t obj_size;
2294 if (final_obj_start == cur_addr) {
2295 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2296 } else {
2297 // This obj doesn't extend into next region; size() is safe to use.
2298 obj_size = cast_to_oop(cur_addr)->size();
2299 }
2300
2301 markWord mark = safe_mark_word_prototype(cur_addr, end_addr);
2302
2303 // Perform the move and update of the object
2304 closure.do_addr(cur_addr, obj_size, mark);
2305
2306 cur_addr += obj_size;
2307 } while (cur_addr < end_addr && !closure.is_full());
2308
2309 if (closure.is_full()) {
2310 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2311 closure.complete_region(dest_addr, region_ptr);
2312 return;
2313 }
2314
2315 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2316
2317 // Move to the next source region, possibly switching spaces as well. All
2318 // args except end_addr may be modified.
2319 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2320 } while (true);
2321 }
2322
2323 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2324 {
2325 MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2405 }
2406
2407 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2408 {
2409 size_t words = MIN2(partial_obj_size, words_remaining());
2410
2411 // This test is necessary; if omitted, the pointer updates to a partial object
2412 // that crosses the dense prefix boundary could be overwritten.
2413 if (source() != copy_destination()) {
2414 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2415 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2416 }
2417 update_state(words);
2418 }
2419
2420 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2421 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2422 region_ptr->set_completed();
2423 }
2424
2425 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words, markWord mark) {
2426 assert(destination() != nullptr, "sanity");
2427 _source = addr;
2428
2429 // The start_array must be updated even if the object is not moving.
2430 if (_start_array != nullptr) {
2431 _start_array->update_for_block(destination(), destination() + words);
2432 }
2433
2434 // Avoid overflow
2435 words = MIN2(words, words_remaining());
2436 assert(words > 0, "inv");
2437
2438 if (copy_destination() != source()) {
2439 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2440 assert(source() != destination(), "inv");
2441 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2442 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2443 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2444 cast_to_oop(copy_destination())->set_mark(mark);
2445 }
2446
2447 update_state(words);
2448 }
2449
2450 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2451 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2452 // Record the shadow region index
2453 region_ptr->set_shadow_region(_shadow);
2454 // Mark the shadow region as filled to indicate the data is ready to be
2455 // copied back
2456 region_ptr->mark_filled();
2457 // Try to copy the content of the shadow region back to its corresponding
2458 // heap region if available; the GC thread that decreases the destination
2459 // count to zero will do the copying otherwise (see
2460 // PSParallelCompact::decrement_destination_counts).
2461 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2462 region_ptr->set_completed();
2463 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2464 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
|