< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page

  59 #include "gc/shared/oopStorageSetParState.inline.hpp"
  60 #include "gc/shared/parallelCleaning.hpp"
  61 #include "gc/shared/preservedMarks.inline.hpp"
  62 #include "gc/shared/referencePolicy.hpp"
  63 #include "gc/shared/referenceProcessor.hpp"
  64 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  65 #include "gc/shared/spaceDecorator.hpp"
  66 #include "gc/shared/taskTerminator.hpp"
  67 #include "gc/shared/weakProcessor.inline.hpp"
  68 #include "gc/shared/workerPolicy.hpp"
  69 #include "gc/shared/workerThread.hpp"
  70 #include "gc/shared/workerUtils.hpp"
  71 #include "logging/log.hpp"
  72 #include "memory/iterator.inline.hpp"
  73 #include "memory/memoryReserver.hpp"
  74 #include "memory/metaspaceUtils.hpp"
  75 #include "memory/resourceArea.hpp"
  76 #include "memory/universe.hpp"
  77 #include "nmt/memTracker.hpp"
  78 #include "oops/access.inline.hpp"

  79 #include "oops/instanceClassLoaderKlass.inline.hpp"
  80 #include "oops/instanceKlass.inline.hpp"
  81 #include "oops/instanceMirrorKlass.inline.hpp"
  82 #include "oops/methodData.hpp"
  83 #include "oops/objArrayKlass.inline.hpp"
  84 #include "oops/oop.inline.hpp"

  85 #include "runtime/atomicAccess.hpp"
  86 #include "runtime/handles.inline.hpp"
  87 #include "runtime/java.hpp"
  88 #include "runtime/safepoint.hpp"
  89 #include "runtime/threads.hpp"
  90 #include "runtime/vmThread.hpp"
  91 #include "services/memoryService.hpp"
  92 #include "utilities/align.hpp"
  93 #include "utilities/debug.hpp"
  94 #include "utilities/events.hpp"
  95 #include "utilities/formatBuffer.hpp"
  96 #include "utilities/macros.hpp"
  97 #include "utilities/stack.inline.hpp"
  98 #if INCLUDE_JVMCI
  99 #include "jvmci/jvmci.hpp"
 100 #endif
 101 
 102 #include <math.h>
 103 
 104 // All sizes are in HeapWords.

1431 
1432 // Split [start, end) evenly for a number of workers and return the
1433 // range for worker_id.
1434 static void split_regions_for_worker(size_t start, size_t end,
1435                                      uint worker_id, uint num_workers,
1436                                      size_t* worker_start, size_t* worker_end) {
1437   assert(start < end, "precondition");
1438   assert(num_workers > 0, "precondition");
1439   assert(worker_id < num_workers, "precondition");
1440 
1441   size_t num_regions = end - start;
1442   size_t num_regions_per_worker = num_regions / num_workers;
1443   size_t remainder = num_regions % num_workers;
1444   // The first few workers will get one extra.
1445   *worker_start = start + worker_id * num_regions_per_worker
1446                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1447   *worker_end = *worker_start + num_regions_per_worker
1448                 + (worker_id < remainder ? 1 : 0);
1449 }
1450 














1451 void PSParallelCompact::forward_to_new_addr() {
1452   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1453   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1454 
1455   struct ForwardTask final : public WorkerTask {
1456     uint _num_workers;
1457 
1458     explicit ForwardTask(uint num_workers) :
1459       WorkerTask("PSForward task"),
1460       _num_workers(num_workers) {}
1461 

















1462     static void forward_objs_in_range(ParCompactionManager* cm,
1463                                       HeapWord* start,
1464                                       HeapWord* end,
1465                                       HeapWord* destination) {
1466       HeapWord* cur_addr = start;
1467       HeapWord* new_addr = destination;
1468 
1469       while (cur_addr < end) {
1470         cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1471         if (cur_addr >= end) {
1472           return;
1473         }
1474         assert(mark_bitmap()->is_marked(cur_addr), "inv");
1475         oop obj = cast_to_oop(cur_addr);

1476         if (new_addr != cur_addr) {
1477           cm->preserved_marks()->push_if_necessary(obj, obj->mark());



1478           FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1479         }
1480         size_t obj_size = obj->size();
1481         new_addr += obj_size;
1482         cur_addr += obj_size;
1483       }
1484     }
1485 
1486     void work(uint worker_id) override {
1487       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1488       for (uint id = old_space_id; id < last_space_id; ++id) {
1489         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1490         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1491         HeapWord* top = sp->top();
1492 
1493         if (dense_prefix_addr == top) {
1494           // Empty space
1495           continue;
1496         }
1497 

2099 
2100 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2101   ParallelCompactData& sd = summary_data();
2102   assert(sd.is_region_aligned(region_start_addr), "precondition");
2103 
2104   // Use per-region partial_obj_size to locate the end of the obj, that extends
2105   // to region_start_addr.
2106   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2107   size_t end_region_idx = sd.region_count();
2108   size_t accumulated_size = 0;
2109   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2110     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2111     accumulated_size += cur_partial_obj_size;
2112     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2113       break;
2114     }
2115   }
2116   return region_start_addr + accumulated_size;
2117 }
2118 














2119 // Use region_idx as the destination region, and evacuate all live objs on its
2120 // source regions to this destination region.
2121 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2122 {
2123   ParMarkBitMap* const bitmap = mark_bitmap();
2124   ParallelCompactData& sd = summary_data();
2125   RegionData* const region_ptr = sd.region(region_idx);
2126 
2127   // Get the source region and related info.
2128   size_t src_region_idx = region_ptr->source_region();
2129   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2130   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2131   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2132 
2133   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2134 
2135   // Adjust src_region_idx to prepare for decrementing destination counts (the
2136   // destination count is not decremented when a region is copied to itself).
2137   if (src_region_idx == region_idx) {
2138     src_region_idx += 1;

2214     HeapWord* cur_addr = closure.source();
2215     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2216                                     src_space_top);
2217     // To handle the case where the final obj in source region extends to next region.
2218     HeapWord* final_obj_start = (end_addr == src_space_top)
2219                                 ? nullptr
2220                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2221     // Apply closure on objs inside [cur_addr, end_addr)
2222     do {
2223       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2224       if (cur_addr == end_addr) {
2225         break;
2226       }
2227       size_t obj_size;
2228       if (final_obj_start == cur_addr) {
2229         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2230       } else {
2231         // This obj doesn't extend into next region; size() is safe to use.
2232         obj_size = cast_to_oop(cur_addr)->size();
2233       }
2234       closure.do_addr(cur_addr, obj_size);





2235       cur_addr += obj_size;
2236     } while (cur_addr < end_addr && !closure.is_full());
2237 
2238     if (closure.is_full()) {
2239       decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2240       closure.complete_region(dest_addr, region_ptr);
2241       return;
2242     }
2243 
2244     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2245 
2246     // Move to the next source region, possibly switching spaces as well.  All
2247     // args except end_addr may be modified.
2248     src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2249   } while (true);
2250 }
2251 
2252 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2253 {
2254   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);

2334 }
2335 
2336 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2337 {
2338   size_t words = MIN2(partial_obj_size, words_remaining());
2339 
2340   // This test is necessary; if omitted, the pointer updates to a partial object
2341   // that crosses the dense prefix boundary could be overwritten.
2342   if (source() != copy_destination()) {
2343     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2344     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2345   }
2346   update_state(words);
2347 }
2348 
2349 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2350   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2351   region_ptr->set_completed();
2352 }
2353 
2354 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2355   assert(destination() != nullptr, "sanity");
2356   _source = addr;
2357 
2358   // The start_array must be updated even if the object is not moving.
2359   if (_start_array != nullptr) {
2360     _start_array->update_for_block(destination(), destination() + words);
2361   }
2362 
2363   // Avoid overflow
2364   words = MIN2(words, words_remaining());
2365   assert(words > 0, "inv");
2366 
2367   if (copy_destination() != source()) {
2368     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2369     assert(source() != destination(), "inv");
2370     assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2371     assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2372     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2373     cast_to_oop(copy_destination())->init_mark();
2374   }
2375 
2376   update_state(words);
2377 }
2378 
2379 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2380   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2381   // Record the shadow region index
2382   region_ptr->set_shadow_region(_shadow);
2383   // Mark the shadow region as filled to indicate the data is ready to be
2384   // copied back
2385   region_ptr->mark_filled();
2386   // Try to copy the content of the shadow region back to its corresponding
2387   // heap region if available; the GC thread that decreases the destination
2388   // count to zero will do the copying otherwise (see
2389   // PSParallelCompact::decrement_destination_counts).
2390   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2391     region_ptr->set_completed();
2392     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2393     ParCompactionManager::push_shadow_region_mt_safe(_shadow);

  59 #include "gc/shared/oopStorageSetParState.inline.hpp"
  60 #include "gc/shared/parallelCleaning.hpp"
  61 #include "gc/shared/preservedMarks.inline.hpp"
  62 #include "gc/shared/referencePolicy.hpp"
  63 #include "gc/shared/referenceProcessor.hpp"
  64 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  65 #include "gc/shared/spaceDecorator.hpp"
  66 #include "gc/shared/taskTerminator.hpp"
  67 #include "gc/shared/weakProcessor.inline.hpp"
  68 #include "gc/shared/workerPolicy.hpp"
  69 #include "gc/shared/workerThread.hpp"
  70 #include "gc/shared/workerUtils.hpp"
  71 #include "logging/log.hpp"
  72 #include "memory/iterator.inline.hpp"
  73 #include "memory/memoryReserver.hpp"
  74 #include "memory/metaspaceUtils.hpp"
  75 #include "memory/resourceArea.hpp"
  76 #include "memory/universe.hpp"
  77 #include "nmt/memTracker.hpp"
  78 #include "oops/access.inline.hpp"
  79 #include "oops/flatArrayKlass.inline.hpp"
  80 #include "oops/instanceClassLoaderKlass.inline.hpp"
  81 #include "oops/instanceKlass.inline.hpp"
  82 #include "oops/instanceMirrorKlass.inline.hpp"
  83 #include "oops/methodData.hpp"
  84 #include "oops/objArrayKlass.inline.hpp"
  85 #include "oops/oop.inline.hpp"
  86 #include "runtime/arguments.hpp"
  87 #include "runtime/atomicAccess.hpp"
  88 #include "runtime/handles.inline.hpp"
  89 #include "runtime/java.hpp"
  90 #include "runtime/safepoint.hpp"
  91 #include "runtime/threads.hpp"
  92 #include "runtime/vmThread.hpp"
  93 #include "services/memoryService.hpp"
  94 #include "utilities/align.hpp"
  95 #include "utilities/debug.hpp"
  96 #include "utilities/events.hpp"
  97 #include "utilities/formatBuffer.hpp"
  98 #include "utilities/macros.hpp"
  99 #include "utilities/stack.inline.hpp"
 100 #if INCLUDE_JVMCI
 101 #include "jvmci/jvmci.hpp"
 102 #endif
 103 
 104 #include <math.h>
 105 
 106 // All sizes are in HeapWords.

1433 
1434 // Split [start, end) evenly for a number of workers and return the
1435 // range for worker_id.
1436 static void split_regions_for_worker(size_t start, size_t end,
1437                                      uint worker_id, uint num_workers,
1438                                      size_t* worker_start, size_t* worker_end) {
1439   assert(start < end, "precondition");
1440   assert(num_workers > 0, "precondition");
1441   assert(worker_id < num_workers, "precondition");
1442 
1443   size_t num_regions = end - start;
1444   size_t num_regions_per_worker = num_regions / num_workers;
1445   size_t remainder = num_regions % num_workers;
1446   // The first few workers will get one extra.
1447   *worker_start = start + worker_id * num_regions_per_worker
1448                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1449   *worker_end = *worker_start + num_regions_per_worker
1450                 + (worker_id < remainder ? 1 : 0);
1451 }
1452 
1453 static bool safe_to_read_header(size_t words) {
1454   precond(words > 0);
1455 
1456   // Safe to read if we have enough words for the full header, i.e., both
1457   // markWord and Klass pointer.
1458   const bool safe = words >= (size_t)oopDesc::header_size();
1459 
1460   // If using Compact Object Headers, the full header is inside the markWord,
1461   // so will always be safe to read
1462   assert(!UseCompactObjectHeaders || safe, "Compact Object Headers should always be safe");
1463 
1464   return safe;
1465 }
1466 
1467 void PSParallelCompact::forward_to_new_addr() {
1468   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1469   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1470 
1471   struct ForwardTask final : public WorkerTask {
1472     uint _num_workers;
1473 
1474     explicit ForwardTask(uint num_workers) :
1475       WorkerTask("PSForward task"),
1476       _num_workers(num_workers) {}
1477 
1478     static bool should_preserve_mark(oop obj, HeapWord* end_addr) {
1479       size_t remaining_words = pointer_delta(end_addr, cast_from_oop<HeapWord*>(obj));
1480 
1481       if (Arguments::is_valhalla_enabled() && !safe_to_read_header(remaining_words)) {
1482         // When using Valhalla, it might be necessary to preserve the Valhalla-
1483         // specific bits in the markWord. If the entire object header is
1484         // copied, the correct markWord (with the appropriate Valhalla bits)
1485         // can be safely read from the Klass. However, if the full header is
1486         // not copied, we cannot safely read the Klass to obtain this information.
1487         // In such cases, we always preserve the markWord to ensure that all
1488         // relevant bits, including Valhalla-specific ones, are retained.
1489         return true;
1490       } else {
1491         return obj->mark().must_be_preserved();
1492       }
1493     }
1494 
1495     static void forward_objs_in_range(ParCompactionManager* cm,
1496                                       HeapWord* start,
1497                                       HeapWord* end,
1498                                       HeapWord* destination) {
1499       HeapWord* cur_addr = start;
1500       HeapWord* new_addr = destination;
1501 
1502       while (cur_addr < end) {
1503         cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1504         if (cur_addr >= end) {
1505           return;
1506         }
1507         assert(mark_bitmap()->is_marked(cur_addr), "inv");
1508         oop obj = cast_to_oop(cur_addr);
1509 
1510         if (new_addr != cur_addr) {
1511           if (should_preserve_mark(obj, end)) {
1512             cm->preserved_marks()->push_always(obj, obj->mark());
1513           }
1514 
1515           FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1516         }
1517         size_t obj_size = obj->size();
1518         new_addr += obj_size;
1519         cur_addr += obj_size;
1520       }
1521     }
1522 
1523     void work(uint worker_id) override {
1524       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1525       for (uint id = old_space_id; id < last_space_id; ++id) {
1526         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1527         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1528         HeapWord* top = sp->top();
1529 
1530         if (dense_prefix_addr == top) {
1531           // Empty space
1532           continue;
1533         }
1534 

2136 
2137 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2138   ParallelCompactData& sd = summary_data();
2139   assert(sd.is_region_aligned(region_start_addr), "precondition");
2140 
2141   // Use per-region partial_obj_size to locate the end of the obj, that extends
2142   // to region_start_addr.
2143   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2144   size_t end_region_idx = sd.region_count();
2145   size_t accumulated_size = 0;
2146   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2147     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2148     accumulated_size += cur_partial_obj_size;
2149     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2150       break;
2151     }
2152   }
2153   return region_start_addr + accumulated_size;
2154 }
2155 
2156 static markWord safe_mark_word_prototype(HeapWord* cur_addr, HeapWord* end_addr) {
2157   // If the original markWord contains bits that cannot be reconstructed because
2158   // the header cannot be safely read, a placeholder is used. In this case,
2159   // the correct markWord is preserved before compaction and restored after
2160   // compaction completes.
2161   size_t remaining_words = pointer_delta(end_addr, cur_addr);
2162 
2163   if (UseCompactObjectHeaders || (Arguments::is_valhalla_enabled() && safe_to_read_header(remaining_words))) {
2164     return cast_to_oop(cur_addr)->klass()->prototype_header();
2165   } else {
2166     return markWord::prototype();
2167   }
2168 }
2169 
2170 // Use region_idx as the destination region, and evacuate all live objs on its
2171 // source regions to this destination region.
2172 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2173 {
2174   ParMarkBitMap* const bitmap = mark_bitmap();
2175   ParallelCompactData& sd = summary_data();
2176   RegionData* const region_ptr = sd.region(region_idx);
2177 
2178   // Get the source region and related info.
2179   size_t src_region_idx = region_ptr->source_region();
2180   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2181   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2182   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2183 
2184   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2185 
2186   // Adjust src_region_idx to prepare for decrementing destination counts (the
2187   // destination count is not decremented when a region is copied to itself).
2188   if (src_region_idx == region_idx) {
2189     src_region_idx += 1;

2265     HeapWord* cur_addr = closure.source();
2266     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2267                                     src_space_top);
2268     // To handle the case where the final obj in source region extends to next region.
2269     HeapWord* final_obj_start = (end_addr == src_space_top)
2270                                 ? nullptr
2271                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2272     // Apply closure on objs inside [cur_addr, end_addr)
2273     do {
2274       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2275       if (cur_addr == end_addr) {
2276         break;
2277       }
2278       size_t obj_size;
2279       if (final_obj_start == cur_addr) {
2280         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2281       } else {
2282         // This obj doesn't extend into next region; size() is safe to use.
2283         obj_size = cast_to_oop(cur_addr)->size();
2284       }
2285 
2286       markWord mark = safe_mark_word_prototype(cur_addr, end_addr);
2287 
2288       // Perform the move and update of the object
2289       closure.do_addr(cur_addr, obj_size, mark);
2290 
2291       cur_addr += obj_size;
2292     } while (cur_addr < end_addr && !closure.is_full());
2293 
2294     if (closure.is_full()) {
2295       decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2296       closure.complete_region(dest_addr, region_ptr);
2297       return;
2298     }
2299 
2300     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2301 
2302     // Move to the next source region, possibly switching spaces as well.  All
2303     // args except end_addr may be modified.
2304     src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2305   } while (true);
2306 }
2307 
2308 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2309 {
2310   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);

2390 }
2391 
2392 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2393 {
2394   size_t words = MIN2(partial_obj_size, words_remaining());
2395 
2396   // This test is necessary; if omitted, the pointer updates to a partial object
2397   // that crosses the dense prefix boundary could be overwritten.
2398   if (source() != copy_destination()) {
2399     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2400     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2401   }
2402   update_state(words);
2403 }
2404 
2405 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2406   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2407   region_ptr->set_completed();
2408 }
2409 
2410 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words, markWord mark) {
2411   assert(destination() != nullptr, "sanity");
2412   _source = addr;
2413 
2414   // The start_array must be updated even if the object is not moving.
2415   if (_start_array != nullptr) {
2416     _start_array->update_for_block(destination(), destination() + words);
2417   }
2418 
2419   // Avoid overflow
2420   words = MIN2(words, words_remaining());
2421   assert(words > 0, "inv");
2422 
2423   if (copy_destination() != source()) {
2424     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2425     assert(source() != destination(), "inv");
2426     assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2427     assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2428     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2429     cast_to_oop(copy_destination())->set_mark(mark);
2430   }
2431 
2432   update_state(words);
2433 }
2434 
2435 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2436   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2437   // Record the shadow region index
2438   region_ptr->set_shadow_region(_shadow);
2439   // Mark the shadow region as filled to indicate the data is ready to be
2440   // copied back
2441   region_ptr->mark_filled();
2442   // Try to copy the content of the shadow region back to its corresponding
2443   // heap region if available; the GC thread that decreases the destination
2444   // count to zero will do the copying otherwise (see
2445   // PSParallelCompact::decrement_destination_counts).
2446   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2447     region_ptr->set_completed();
2448     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2449     ParCompactionManager::push_shadow_region_mt_safe(_shadow);
< prev index next >