61 #include "gc/shared/oopStorageSetParState.inline.hpp"
62 #include "gc/shared/parallelCleaning.hpp"
63 #include "gc/shared/preservedMarks.inline.hpp"
64 #include "gc/shared/referencePolicy.hpp"
65 #include "gc/shared/referenceProcessor.hpp"
66 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
67 #include "gc/shared/spaceDecorator.hpp"
68 #include "gc/shared/taskTerminator.hpp"
69 #include "gc/shared/weakProcessor.inline.hpp"
70 #include "gc/shared/workerPolicy.hpp"
71 #include "gc/shared/workerThread.hpp"
72 #include "gc/shared/workerUtils.hpp"
73 #include "logging/log.hpp"
74 #include "memory/iterator.inline.hpp"
75 #include "memory/memoryReserver.hpp"
76 #include "memory/metaspaceUtils.hpp"
77 #include "memory/resourceArea.hpp"
78 #include "memory/universe.hpp"
79 #include "nmt/memTracker.hpp"
80 #include "oops/access.inline.hpp"
81 #include "oops/instanceClassLoaderKlass.inline.hpp"
82 #include "oops/instanceKlass.inline.hpp"
83 #include "oops/instanceMirrorKlass.inline.hpp"
84 #include "oops/methodData.hpp"
85 #include "oops/objArrayKlass.inline.hpp"
86 #include "oops/oop.inline.hpp"
87 #include "runtime/handles.inline.hpp"
88 #include "runtime/java.hpp"
89 #include "runtime/safepoint.hpp"
90 #include "runtime/threads.hpp"
91 #include "runtime/vmThread.hpp"
92 #include "services/memoryService.hpp"
93 #include "utilities/align.hpp"
94 #include "utilities/debug.hpp"
95 #include "utilities/events.hpp"
96 #include "utilities/formatBuffer.hpp"
97 #include "utilities/macros.hpp"
98 #include "utilities/stack.inline.hpp"
99 #if INCLUDE_JVMCI
100 #include "jvmci/jvmci.hpp"
101 #endif
102
103 #include <math.h>
104
105 // All sizes are in HeapWords.
106 const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words
1446
1447 // Split [start, end) evenly for a number of workers and return the
1448 // range for worker_id.
1449 static void split_regions_for_worker(size_t start, size_t end,
1450 uint worker_id, uint num_workers,
1451 size_t* worker_start, size_t* worker_end) {
1452 assert(start < end, "precondition");
1453 assert(num_workers > 0, "precondition");
1454 assert(worker_id < num_workers, "precondition");
1455
1456 size_t num_regions = end - start;
1457 size_t num_regions_per_worker = num_regions / num_workers;
1458 size_t remainder = num_regions % num_workers;
1459 // The first few workers will get one extra.
1460 *worker_start = start + worker_id * num_regions_per_worker
1461 + MIN2(checked_cast<size_t>(worker_id), remainder);
1462 *worker_end = *worker_start + num_regions_per_worker
1463 + (worker_id < remainder ? 1 : 0);
1464 }
1465
1466 void PSParallelCompact::forward_to_new_addr() {
1467 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1468 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1469
1470 struct ForwardTask final : public WorkerTask {
1471 uint _num_workers;
1472
1473 explicit ForwardTask(uint num_workers) :
1474 WorkerTask("PSForward task"),
1475 _num_workers(num_workers) {}
1476
1477 static void forward_objs_in_range(ParCompactionManager* cm,
1478 HeapWord* start,
1479 HeapWord* end,
1480 HeapWord* destination) {
1481 HeapWord* cur_addr = start;
1482 HeapWord* new_addr = destination;
1483
1484 while (cur_addr < end) {
1485 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1486 if (cur_addr >= end) {
1487 return;
1488 }
1489 assert(mark_bitmap()->is_marked(cur_addr), "inv");
1490 oop obj = cast_to_oop(cur_addr);
1491 if (new_addr != cur_addr) {
1492 cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1493 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1494 }
1495 size_t obj_size = obj->size();
1496 new_addr += obj_size;
1497 cur_addr += obj_size;
1498 }
1499 }
1500
1501 void work(uint worker_id) override {
1502 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1503 for (uint id = old_space_id; id < last_space_id; ++id) {
1504 MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1505 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1506 HeapWord* top = sp->top();
1507
1508 if (dense_prefix_addr == top) {
1509 // Empty space
1510 continue;
1511 }
1512
2113
2114 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2115 ParallelCompactData& sd = summary_data();
2116 assert(sd.is_region_aligned(region_start_addr), "precondition");
2117
2118 // Use per-region partial_obj_size to locate the end of the obj, that extends
2119 // to region_start_addr.
2120 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2121 size_t end_region_idx = sd.region_count();
2122 size_t accumulated_size = 0;
2123 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2124 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2125 accumulated_size += cur_partial_obj_size;
2126 if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2127 break;
2128 }
2129 }
2130 return region_start_addr + accumulated_size;
2131 }
2132
2133 // Use region_idx as the destination region, and evacuate all live objs on its
2134 // source regions to this destination region.
2135 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2136 {
2137 ParMarkBitMap* const bitmap = mark_bitmap();
2138 ParallelCompactData& sd = summary_data();
2139 RegionData* const region_ptr = sd.region(region_idx);
2140
2141 // Get the source region and related info.
2142 size_t src_region_idx = region_ptr->source_region();
2143 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2144 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2145 HeapWord* dest_addr = sd.region_to_addr(region_idx);
2146
2147 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2148
2149 // Adjust src_region_idx to prepare for decrementing destination counts (the
2150 // destination count is not decremented when a region is copied to itself).
2151 if (src_region_idx == region_idx) {
2152 src_region_idx += 1;
2228 HeapWord* cur_addr = closure.source();
2229 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2230 src_space_top);
2231 // To handle the case where the final obj in source region extends to next region.
2232 HeapWord* final_obj_start = (end_addr == src_space_top)
2233 ? nullptr
2234 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2235 // Apply closure on objs inside [cur_addr, end_addr)
2236 do {
2237 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2238 if (cur_addr == end_addr) {
2239 break;
2240 }
2241 size_t obj_size;
2242 if (final_obj_start == cur_addr) {
2243 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2244 } else {
2245 // This obj doesn't extend into next region; size() is safe to use.
2246 obj_size = cast_to_oop(cur_addr)->size();
2247 }
2248 closure.do_addr(cur_addr, obj_size);
2249 cur_addr += obj_size;
2250 } while (cur_addr < end_addr && !closure.is_full());
2251
2252 if (closure.is_full()) {
2253 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2254 closure.complete_region(dest_addr, region_ptr);
2255 return;
2256 }
2257
2258 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2259
2260 // Move to the next source region, possibly switching spaces as well. All
2261 // args except end_addr may be modified.
2262 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2263 } while (true);
2264 }
2265
2266 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2267 {
2268 MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2348 }
2349
2350 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2351 {
2352 size_t words = MIN2(partial_obj_size, words_remaining());
2353
2354 // This test is necessary; if omitted, the pointer updates to a partial object
2355 // that crosses the dense prefix boundary could be overwritten.
2356 if (source() != copy_destination()) {
2357 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2358 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2359 }
2360 update_state(words);
2361 }
2362
2363 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2364 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2365 region_ptr->set_completed();
2366 }
2367
2368 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2369 assert(destination() != nullptr, "sanity");
2370 _source = addr;
2371
2372 // The start_array must be updated even if the object is not moving.
2373 if (_start_array != nullptr) {
2374 _start_array->update_for_block(destination(), destination() + words);
2375 }
2376
2377 // Avoid overflow
2378 words = MIN2(words, words_remaining());
2379 assert(words > 0, "inv");
2380
2381 if (copy_destination() != source()) {
2382 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2383 assert(source() != destination(), "inv");
2384 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2385 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2386 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2387 cast_to_oop(copy_destination())->init_mark();
2388 }
2389
2390 update_state(words);
2391 }
2392
2393 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2394 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2395 // Record the shadow region index
2396 region_ptr->set_shadow_region(_shadow);
2397 // Mark the shadow region as filled to indicate the data is ready to be
2398 // copied back
2399 region_ptr->mark_filled();
2400 // Try to copy the content of the shadow region back to its corresponding
2401 // heap region if available; the GC thread that decreases the destination
2402 // count to zero will do the copying otherwise (see
2403 // PSParallelCompact::decrement_destination_counts).
2404 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2405 region_ptr->set_completed();
2406 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2407 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
|
61 #include "gc/shared/oopStorageSetParState.inline.hpp"
62 #include "gc/shared/parallelCleaning.hpp"
63 #include "gc/shared/preservedMarks.inline.hpp"
64 #include "gc/shared/referencePolicy.hpp"
65 #include "gc/shared/referenceProcessor.hpp"
66 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
67 #include "gc/shared/spaceDecorator.hpp"
68 #include "gc/shared/taskTerminator.hpp"
69 #include "gc/shared/weakProcessor.inline.hpp"
70 #include "gc/shared/workerPolicy.hpp"
71 #include "gc/shared/workerThread.hpp"
72 #include "gc/shared/workerUtils.hpp"
73 #include "logging/log.hpp"
74 #include "memory/iterator.inline.hpp"
75 #include "memory/memoryReserver.hpp"
76 #include "memory/metaspaceUtils.hpp"
77 #include "memory/resourceArea.hpp"
78 #include "memory/universe.hpp"
79 #include "nmt/memTracker.hpp"
80 #include "oops/access.inline.hpp"
81 #include "oops/flatArrayKlass.inline.hpp"
82 #include "oops/instanceClassLoaderKlass.inline.hpp"
83 #include "oops/instanceKlass.inline.hpp"
84 #include "oops/instanceMirrorKlass.inline.hpp"
85 #include "oops/methodData.hpp"
86 #include "oops/objArrayKlass.inline.hpp"
87 #include "oops/oop.inline.hpp"
88 #include "runtime/arguments.hpp"
89 #include "runtime/handles.inline.hpp"
90 #include "runtime/java.hpp"
91 #include "runtime/safepoint.hpp"
92 #include "runtime/threads.hpp"
93 #include "runtime/vmThread.hpp"
94 #include "services/memoryService.hpp"
95 #include "utilities/align.hpp"
96 #include "utilities/debug.hpp"
97 #include "utilities/events.hpp"
98 #include "utilities/formatBuffer.hpp"
99 #include "utilities/macros.hpp"
100 #include "utilities/stack.inline.hpp"
101 #if INCLUDE_JVMCI
102 #include "jvmci/jvmci.hpp"
103 #endif
104
105 #include <math.h>
106
107 // All sizes are in HeapWords.
108 const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words
1448
1449 // Split [start, end) evenly for a number of workers and return the
1450 // range for worker_id.
1451 static void split_regions_for_worker(size_t start, size_t end,
1452 uint worker_id, uint num_workers,
1453 size_t* worker_start, size_t* worker_end) {
1454 assert(start < end, "precondition");
1455 assert(num_workers > 0, "precondition");
1456 assert(worker_id < num_workers, "precondition");
1457
1458 size_t num_regions = end - start;
1459 size_t num_regions_per_worker = num_regions / num_workers;
1460 size_t remainder = num_regions % num_workers;
1461 // The first few workers will get one extra.
1462 *worker_start = start + worker_id * num_regions_per_worker
1463 + MIN2(checked_cast<size_t>(worker_id), remainder);
1464 *worker_end = *worker_start + num_regions_per_worker
1465 + (worker_id < remainder ? 1 : 0);
1466 }
1467
1468 static bool safe_to_read_header(size_t words) {
1469 precond(words > 0);
1470
1471 // Safe to read if we have enough words for the full header, i.e., both
1472 // markWord and Klass pointer.
1473 const bool safe = words >= (size_t)oopDesc::header_size();
1474
1475 // If using Compact Object Headers, the full header is inside the markWord,
1476 // so will always be safe to read
1477 assert(!UseCompactObjectHeaders || safe, "Compact Object Headers should always be safe");
1478
1479 return safe;
1480 }
1481
1482 void PSParallelCompact::forward_to_new_addr() {
1483 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1484 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1485
1486 struct ForwardTask final : public WorkerTask {
1487 uint _num_workers;
1488
1489 explicit ForwardTask(uint num_workers) :
1490 WorkerTask("PSForward task"),
1491 _num_workers(num_workers) {}
1492
1493 static bool should_preserve_mark(oop obj, HeapWord* end_addr) {
1494 size_t remaining_words = pointer_delta(end_addr, cast_from_oop<HeapWord*>(obj));
1495
1496 if (Arguments::is_valhalla_enabled() && !safe_to_read_header(remaining_words)) {
1497 // When using Valhalla, it might be necessary to preserve the Valhalla-
1498 // specific bits in the markWord. If the entire object header is
1499 // copied, the correct markWord (with the appropriate Valhalla bits)
1500 // can be safely read from the Klass. However, if the full header is
1501 // not copied, we cannot safely read the Klass to obtain this information.
1502 // In such cases, we always preserve the markWord to ensure that all
1503 // relevant bits, including Valhalla-specific ones, are retained.
1504 return true;
1505 } else {
1506 return obj->mark().must_be_preserved();
1507 }
1508 }
1509
1510 static void forward_objs_in_range(ParCompactionManager* cm,
1511 HeapWord* start,
1512 HeapWord* end,
1513 HeapWord* destination) {
1514 HeapWord* cur_addr = start;
1515 HeapWord* new_addr = destination;
1516
1517 while (cur_addr < end) {
1518 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1519 if (cur_addr >= end) {
1520 return;
1521 }
1522 assert(mark_bitmap()->is_marked(cur_addr), "inv");
1523 oop obj = cast_to_oop(cur_addr);
1524
1525 if (new_addr != cur_addr) {
1526 if (should_preserve_mark(obj, end)) {
1527 cm->preserved_marks()->push_always(obj, obj->mark());
1528 }
1529
1530 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1531 }
1532 size_t obj_size = obj->size();
1533 new_addr += obj_size;
1534 cur_addr += obj_size;
1535 }
1536 }
1537
1538 void work(uint worker_id) override {
1539 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1540 for (uint id = old_space_id; id < last_space_id; ++id) {
1541 MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1542 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1543 HeapWord* top = sp->top();
1544
1545 if (dense_prefix_addr == top) {
1546 // Empty space
1547 continue;
1548 }
1549
2150
2151 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2152 ParallelCompactData& sd = summary_data();
2153 assert(sd.is_region_aligned(region_start_addr), "precondition");
2154
2155 // Use per-region partial_obj_size to locate the end of the obj, that extends
2156 // to region_start_addr.
2157 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2158 size_t end_region_idx = sd.region_count();
2159 size_t accumulated_size = 0;
2160 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2161 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2162 accumulated_size += cur_partial_obj_size;
2163 if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2164 break;
2165 }
2166 }
2167 return region_start_addr + accumulated_size;
2168 }
2169
2170 static markWord safe_mark_word_prototype(HeapWord* cur_addr, HeapWord* end_addr) {
2171 // If the original markWord contains bits that cannot be reconstructed because
2172 // the header cannot be safely read, a placeholder is used. In this case,
2173 // the correct markWord is preserved before compaction and restored after
2174 // compaction completes.
2175 size_t remaining_words = pointer_delta(end_addr, cur_addr);
2176
2177 if (UseCompactObjectHeaders || (Arguments::is_valhalla_enabled() && safe_to_read_header(remaining_words))) {
2178 return cast_to_oop(cur_addr)->klass()->prototype_header();
2179 } else {
2180 return markWord::prototype();
2181 }
2182 }
2183
2184 // Use region_idx as the destination region, and evacuate all live objs on its
2185 // source regions to this destination region.
2186 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2187 {
2188 ParMarkBitMap* const bitmap = mark_bitmap();
2189 ParallelCompactData& sd = summary_data();
2190 RegionData* const region_ptr = sd.region(region_idx);
2191
2192 // Get the source region and related info.
2193 size_t src_region_idx = region_ptr->source_region();
2194 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2195 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2196 HeapWord* dest_addr = sd.region_to_addr(region_idx);
2197
2198 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2199
2200 // Adjust src_region_idx to prepare for decrementing destination counts (the
2201 // destination count is not decremented when a region is copied to itself).
2202 if (src_region_idx == region_idx) {
2203 src_region_idx += 1;
2279 HeapWord* cur_addr = closure.source();
2280 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2281 src_space_top);
2282 // To handle the case where the final obj in source region extends to next region.
2283 HeapWord* final_obj_start = (end_addr == src_space_top)
2284 ? nullptr
2285 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2286 // Apply closure on objs inside [cur_addr, end_addr)
2287 do {
2288 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2289 if (cur_addr == end_addr) {
2290 break;
2291 }
2292 size_t obj_size;
2293 if (final_obj_start == cur_addr) {
2294 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2295 } else {
2296 // This obj doesn't extend into next region; size() is safe to use.
2297 obj_size = cast_to_oop(cur_addr)->size();
2298 }
2299
2300 markWord mark = safe_mark_word_prototype(cur_addr, end_addr);
2301
2302 // Perform the move and update of the object
2303 closure.do_addr(cur_addr, obj_size, mark);
2304
2305 cur_addr += obj_size;
2306 } while (cur_addr < end_addr && !closure.is_full());
2307
2308 if (closure.is_full()) {
2309 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2310 closure.complete_region(dest_addr, region_ptr);
2311 return;
2312 }
2313
2314 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2315
2316 // Move to the next source region, possibly switching spaces as well. All
2317 // args except end_addr may be modified.
2318 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2319 } while (true);
2320 }
2321
2322 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2323 {
2324 MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2404 }
2405
2406 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2407 {
2408 size_t words = MIN2(partial_obj_size, words_remaining());
2409
2410 // This test is necessary; if omitted, the pointer updates to a partial object
2411 // that crosses the dense prefix boundary could be overwritten.
2412 if (source() != copy_destination()) {
2413 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2414 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2415 }
2416 update_state(words);
2417 }
2418
2419 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2420 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2421 region_ptr->set_completed();
2422 }
2423
2424 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words, markWord mark) {
2425 assert(destination() != nullptr, "sanity");
2426 _source = addr;
2427
2428 // The start_array must be updated even if the object is not moving.
2429 if (_start_array != nullptr) {
2430 _start_array->update_for_block(destination(), destination() + words);
2431 }
2432
2433 // Avoid overflow
2434 words = MIN2(words, words_remaining());
2435 assert(words > 0, "inv");
2436
2437 if (copy_destination() != source()) {
2438 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2439 assert(source() != destination(), "inv");
2440 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2441 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2442 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2443 cast_to_oop(copy_destination())->set_mark(mark);
2444 }
2445
2446 update_state(words);
2447 }
2448
2449 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2450 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2451 // Record the shadow region index
2452 region_ptr->set_shadow_region(_shadow);
2453 // Mark the shadow region as filled to indicate the data is ready to be
2454 // copied back
2455 region_ptr->mark_filled();
2456 // Try to copy the content of the shadow region back to its corresponding
2457 // heap region if available; the GC thread that decreases the destination
2458 // count to zero will do the copying otherwise (see
2459 // PSParallelCompact::decrement_destination_counts).
2460 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2461 region_ptr->set_completed();
2462 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2463 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
|