< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page

  61 #include "gc/shared/oopStorageSetParState.inline.hpp"
  62 #include "gc/shared/parallelCleaning.hpp"
  63 #include "gc/shared/preservedMarks.inline.hpp"
  64 #include "gc/shared/referencePolicy.hpp"
  65 #include "gc/shared/referenceProcessor.hpp"
  66 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  67 #include "gc/shared/spaceDecorator.hpp"
  68 #include "gc/shared/taskTerminator.hpp"
  69 #include "gc/shared/weakProcessor.inline.hpp"
  70 #include "gc/shared/workerPolicy.hpp"
  71 #include "gc/shared/workerThread.hpp"
  72 #include "gc/shared/workerUtils.hpp"
  73 #include "logging/log.hpp"
  74 #include "memory/iterator.inline.hpp"
  75 #include "memory/memoryReserver.hpp"
  76 #include "memory/metaspaceUtils.hpp"
  77 #include "memory/resourceArea.hpp"
  78 #include "memory/universe.hpp"
  79 #include "nmt/memTracker.hpp"
  80 #include "oops/access.inline.hpp"

  81 #include "oops/instanceClassLoaderKlass.inline.hpp"
  82 #include "oops/instanceKlass.inline.hpp"
  83 #include "oops/instanceMirrorKlass.inline.hpp"
  84 #include "oops/methodData.hpp"
  85 #include "oops/objArrayKlass.inline.hpp"
  86 #include "oops/oop.inline.hpp"

  87 #include "runtime/handles.inline.hpp"
  88 #include "runtime/java.hpp"
  89 #include "runtime/safepoint.hpp"
  90 #include "runtime/threads.hpp"
  91 #include "runtime/vmThread.hpp"
  92 #include "services/memoryService.hpp"
  93 #include "utilities/align.hpp"
  94 #include "utilities/debug.hpp"
  95 #include "utilities/events.hpp"
  96 #include "utilities/formatBuffer.hpp"
  97 #include "utilities/macros.hpp"
  98 #include "utilities/stack.inline.hpp"
  99 #if INCLUDE_JVMCI
 100 #include "jvmci/jvmci.hpp"
 101 #endif
 102 
 103 #include <math.h>
 104 
 105 // All sizes are in HeapWords.
 106 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words

1431 
1432 // Split [start, end) evenly for a number of workers and return the
1433 // range for worker_id.
1434 static void split_regions_for_worker(size_t start, size_t end,
1435                                      uint worker_id, uint num_workers,
1436                                      size_t* worker_start, size_t* worker_end) {
1437   assert(start < end, "precondition");
1438   assert(num_workers > 0, "precondition");
1439   assert(worker_id < num_workers, "precondition");
1440 
1441   size_t num_regions = end - start;
1442   size_t num_regions_per_worker = num_regions / num_workers;
1443   size_t remainder = num_regions % num_workers;
1444   // The first few workers will get one extra.
1445   *worker_start = start + worker_id * num_regions_per_worker
1446                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1447   *worker_end = *worker_start + num_regions_per_worker
1448                 + (worker_id < remainder ? 1 : 0);
1449 }
1450 














1451 void PSParallelCompact::forward_to_new_addr() {
1452   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1453   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1454 
1455   struct ForwardTask final : public WorkerTask {
1456     uint _num_workers;
1457 
1458     explicit ForwardTask(uint num_workers) :
1459       WorkerTask("PSForward task"),
1460       _num_workers(num_workers) {}
1461 

















1462     static void forward_objs_in_range(ParCompactionManager* cm,
1463                                       HeapWord* start,
1464                                       HeapWord* end,
1465                                       HeapWord* destination) {
1466       HeapWord* cur_addr = start;
1467       HeapWord* new_addr = destination;
1468 
1469       while (cur_addr < end) {
1470         cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1471         if (cur_addr >= end) {
1472           return;
1473         }
1474         assert(mark_bitmap()->is_marked(cur_addr), "inv");
1475         oop obj = cast_to_oop(cur_addr);

1476         if (new_addr != cur_addr) {
1477           cm->preserved_marks()->push_if_necessary(obj, obj->mark());



1478           FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1479         }
1480         size_t obj_size = obj->size();
1481         new_addr += obj_size;
1482         cur_addr += obj_size;
1483       }
1484     }
1485 
1486     void work(uint worker_id) override {
1487       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1488       for (uint id = old_space_id; id < last_space_id; ++id) {
1489         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1490         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1491         HeapWord* top = sp->top();
1492 
1493         if (dense_prefix_addr == top) {
1494           // Empty space
1495           continue;
1496         }
1497 

2098 
2099 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2100   ParallelCompactData& sd = summary_data();
2101   assert(sd.is_region_aligned(region_start_addr), "precondition");
2102 
2103   // Use per-region partial_obj_size to locate the end of the obj, that extends
2104   // to region_start_addr.
2105   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2106   size_t end_region_idx = sd.region_count();
2107   size_t accumulated_size = 0;
2108   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2109     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2110     accumulated_size += cur_partial_obj_size;
2111     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2112       break;
2113     }
2114   }
2115   return region_start_addr + accumulated_size;
2116 }
2117 














2118 // Use region_idx as the destination region, and evacuate all live objs on its
2119 // source regions to this destination region.
2120 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2121 {
2122   ParMarkBitMap* const bitmap = mark_bitmap();
2123   ParallelCompactData& sd = summary_data();
2124   RegionData* const region_ptr = sd.region(region_idx);
2125 
2126   // Get the source region and related info.
2127   size_t src_region_idx = region_ptr->source_region();
2128   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2129   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2130   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2131 
2132   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2133 
2134   // Adjust src_region_idx to prepare for decrementing destination counts (the
2135   // destination count is not decremented when a region is copied to itself).
2136   if (src_region_idx == region_idx) {
2137     src_region_idx += 1;

2213     HeapWord* cur_addr = closure.source();
2214     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2215                                     src_space_top);
2216     // To handle the case where the final obj in source region extends to next region.
2217     HeapWord* final_obj_start = (end_addr == src_space_top)
2218                                 ? nullptr
2219                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2220     // Apply closure on objs inside [cur_addr, end_addr)
2221     do {
2222       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2223       if (cur_addr == end_addr) {
2224         break;
2225       }
2226       size_t obj_size;
2227       if (final_obj_start == cur_addr) {
2228         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2229       } else {
2230         // This obj doesn't extend into next region; size() is safe to use.
2231         obj_size = cast_to_oop(cur_addr)->size();
2232       }
2233       closure.do_addr(cur_addr, obj_size);





2234       cur_addr += obj_size;
2235     } while (cur_addr < end_addr && !closure.is_full());
2236 
2237     if (closure.is_full()) {
2238       decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2239       closure.complete_region(dest_addr, region_ptr);
2240       return;
2241     }
2242 
2243     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2244 
2245     // Move to the next source region, possibly switching spaces as well.  All
2246     // args except end_addr may be modified.
2247     src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2248   } while (true);
2249 }
2250 
2251 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2252 {
2253   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);

2333 }
2334 
2335 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2336 {
2337   size_t words = MIN2(partial_obj_size, words_remaining());
2338 
2339   // This test is necessary; if omitted, the pointer updates to a partial object
2340   // that crosses the dense prefix boundary could be overwritten.
2341   if (source() != copy_destination()) {
2342     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2343     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2344   }
2345   update_state(words);
2346 }
2347 
2348 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2349   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2350   region_ptr->set_completed();
2351 }
2352 
2353 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2354   assert(destination() != nullptr, "sanity");
2355   _source = addr;
2356 
2357   // The start_array must be updated even if the object is not moving.
2358   if (_start_array != nullptr) {
2359     _start_array->update_for_block(destination(), destination() + words);
2360   }
2361 
2362   // Avoid overflow
2363   words = MIN2(words, words_remaining());
2364   assert(words > 0, "inv");
2365 
2366   if (copy_destination() != source()) {
2367     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2368     assert(source() != destination(), "inv");
2369     assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2370     assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2371     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2372     cast_to_oop(copy_destination())->init_mark();
2373   }
2374 
2375   update_state(words);
2376 }
2377 
2378 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2379   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2380   // Record the shadow region index
2381   region_ptr->set_shadow_region(_shadow);
2382   // Mark the shadow region as filled to indicate the data is ready to be
2383   // copied back
2384   region_ptr->mark_filled();
2385   // Try to copy the content of the shadow region back to its corresponding
2386   // heap region if available; the GC thread that decreases the destination
2387   // count to zero will do the copying otherwise (see
2388   // PSParallelCompact::decrement_destination_counts).
2389   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2390     region_ptr->set_completed();
2391     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2392     ParCompactionManager::push_shadow_region_mt_safe(_shadow);

  61 #include "gc/shared/oopStorageSetParState.inline.hpp"
  62 #include "gc/shared/parallelCleaning.hpp"
  63 #include "gc/shared/preservedMarks.inline.hpp"
  64 #include "gc/shared/referencePolicy.hpp"
  65 #include "gc/shared/referenceProcessor.hpp"
  66 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  67 #include "gc/shared/spaceDecorator.hpp"
  68 #include "gc/shared/taskTerminator.hpp"
  69 #include "gc/shared/weakProcessor.inline.hpp"
  70 #include "gc/shared/workerPolicy.hpp"
  71 #include "gc/shared/workerThread.hpp"
  72 #include "gc/shared/workerUtils.hpp"
  73 #include "logging/log.hpp"
  74 #include "memory/iterator.inline.hpp"
  75 #include "memory/memoryReserver.hpp"
  76 #include "memory/metaspaceUtils.hpp"
  77 #include "memory/resourceArea.hpp"
  78 #include "memory/universe.hpp"
  79 #include "nmt/memTracker.hpp"
  80 #include "oops/access.inline.hpp"
  81 #include "oops/flatArrayKlass.inline.hpp"
  82 #include "oops/instanceClassLoaderKlass.inline.hpp"
  83 #include "oops/instanceKlass.inline.hpp"
  84 #include "oops/instanceMirrorKlass.inline.hpp"
  85 #include "oops/methodData.hpp"
  86 #include "oops/objArrayKlass.inline.hpp"
  87 #include "oops/oop.inline.hpp"
  88 #include "runtime/arguments.hpp"
  89 #include "runtime/handles.inline.hpp"
  90 #include "runtime/java.hpp"
  91 #include "runtime/safepoint.hpp"
  92 #include "runtime/threads.hpp"
  93 #include "runtime/vmThread.hpp"
  94 #include "services/memoryService.hpp"
  95 #include "utilities/align.hpp"
  96 #include "utilities/debug.hpp"
  97 #include "utilities/events.hpp"
  98 #include "utilities/formatBuffer.hpp"
  99 #include "utilities/macros.hpp"
 100 #include "utilities/stack.inline.hpp"
 101 #if INCLUDE_JVMCI
 102 #include "jvmci/jvmci.hpp"
 103 #endif
 104 
 105 #include <math.h>
 106 
 107 // All sizes are in HeapWords.
 108 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words

1433 
1434 // Split [start, end) evenly for a number of workers and return the
1435 // range for worker_id.
1436 static void split_regions_for_worker(size_t start, size_t end,
1437                                      uint worker_id, uint num_workers,
1438                                      size_t* worker_start, size_t* worker_end) {
1439   assert(start < end, "precondition");
1440   assert(num_workers > 0, "precondition");
1441   assert(worker_id < num_workers, "precondition");
1442 
1443   size_t num_regions = end - start;
1444   size_t num_regions_per_worker = num_regions / num_workers;
1445   size_t remainder = num_regions % num_workers;
1446   // The first few workers will get one extra.
1447   *worker_start = start + worker_id * num_regions_per_worker
1448                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1449   *worker_end = *worker_start + num_regions_per_worker
1450                 + (worker_id < remainder ? 1 : 0);
1451 }
1452 
1453 static bool safe_to_read_header(size_t words) {
1454   precond(words > 0);
1455 
1456   // Safe to read if we have enough words for the full header, i.e., both
1457   // markWord and Klass pointer.
1458   const bool safe = words >= (size_t)oopDesc::header_size();
1459 
1460   // If using Compact Object Headers, the full header is inside the markWord,
1461   // so will always be safe to read
1462   assert(!UseCompactObjectHeaders || safe, "Compact Object Headers should always be safe");
1463 
1464   return safe;
1465 }
1466 
1467 void PSParallelCompact::forward_to_new_addr() {
1468   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1469   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1470 
1471   struct ForwardTask final : public WorkerTask {
1472     uint _num_workers;
1473 
1474     explicit ForwardTask(uint num_workers) :
1475       WorkerTask("PSForward task"),
1476       _num_workers(num_workers) {}
1477 
1478     static bool should_preserve_mark(oop obj, HeapWord* end_addr) {
1479       size_t remaining_words = pointer_delta(end_addr, cast_from_oop<HeapWord*>(obj));
1480 
1481       if (Arguments::is_valhalla_enabled() && !safe_to_read_header(remaining_words)) {
1482         // When using Valhalla, it might be necessary to preserve the Valhalla-
1483         // specific bits in the markWord. If the entire object header is
1484         // copied, the correct markWord (with the appropriate Valhalla bits)
1485         // can be safely read from the Klass. However, if the full header is
1486         // not copied, we cannot safely read the Klass to obtain this information.
1487         // In such cases, we always preserve the markWord to ensure that all
1488         // relevant bits, including Valhalla-specific ones, are retained.
1489         return true;
1490       } else {
1491         return obj->mark().must_be_preserved();
1492       }
1493     }
1494 
1495     static void forward_objs_in_range(ParCompactionManager* cm,
1496                                       HeapWord* start,
1497                                       HeapWord* end,
1498                                       HeapWord* destination) {
1499       HeapWord* cur_addr = start;
1500       HeapWord* new_addr = destination;
1501 
1502       while (cur_addr < end) {
1503         cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1504         if (cur_addr >= end) {
1505           return;
1506         }
1507         assert(mark_bitmap()->is_marked(cur_addr), "inv");
1508         oop obj = cast_to_oop(cur_addr);
1509 
1510         if (new_addr != cur_addr) {
1511           if (should_preserve_mark(obj, end)) {
1512             cm->preserved_marks()->push_always(obj, obj->mark());
1513           }
1514 
1515           FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1516         }
1517         size_t obj_size = obj->size();
1518         new_addr += obj_size;
1519         cur_addr += obj_size;
1520       }
1521     }
1522 
1523     void work(uint worker_id) override {
1524       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1525       for (uint id = old_space_id; id < last_space_id; ++id) {
1526         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1527         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1528         HeapWord* top = sp->top();
1529 
1530         if (dense_prefix_addr == top) {
1531           // Empty space
1532           continue;
1533         }
1534 

2135 
2136 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2137   ParallelCompactData& sd = summary_data();
2138   assert(sd.is_region_aligned(region_start_addr), "precondition");
2139 
2140   // Use per-region partial_obj_size to locate the end of the obj, that extends
2141   // to region_start_addr.
2142   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2143   size_t end_region_idx = sd.region_count();
2144   size_t accumulated_size = 0;
2145   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2146     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2147     accumulated_size += cur_partial_obj_size;
2148     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2149       break;
2150     }
2151   }
2152   return region_start_addr + accumulated_size;
2153 }
2154 
2155 static markWord safe_mark_word_prototype(HeapWord* cur_addr, HeapWord* end_addr) {
2156   // If the original markWord contains bits that cannot be reconstructed because
2157   // the header cannot be safely read, a placeholder is used. In this case,
2158   // the correct markWord is preserved before compaction and restored after
2159   // compaction completes.
2160   size_t remaining_words = pointer_delta(end_addr, cur_addr);
2161 
2162   if (UseCompactObjectHeaders || (Arguments::is_valhalla_enabled() && safe_to_read_header(remaining_words))) {
2163     return cast_to_oop(cur_addr)->klass()->prototype_header();
2164   } else {
2165     return markWord::prototype();
2166   }
2167 }
2168 
2169 // Use region_idx as the destination region, and evacuate all live objs on its
2170 // source regions to this destination region.
2171 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2172 {
2173   ParMarkBitMap* const bitmap = mark_bitmap();
2174   ParallelCompactData& sd = summary_data();
2175   RegionData* const region_ptr = sd.region(region_idx);
2176 
2177   // Get the source region and related info.
2178   size_t src_region_idx = region_ptr->source_region();
2179   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2180   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2181   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2182 
2183   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2184 
2185   // Adjust src_region_idx to prepare for decrementing destination counts (the
2186   // destination count is not decremented when a region is copied to itself).
2187   if (src_region_idx == region_idx) {
2188     src_region_idx += 1;

2264     HeapWord* cur_addr = closure.source();
2265     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2266                                     src_space_top);
2267     // To handle the case where the final obj in source region extends to next region.
2268     HeapWord* final_obj_start = (end_addr == src_space_top)
2269                                 ? nullptr
2270                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2271     // Apply closure on objs inside [cur_addr, end_addr)
2272     do {
2273       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2274       if (cur_addr == end_addr) {
2275         break;
2276       }
2277       size_t obj_size;
2278       if (final_obj_start == cur_addr) {
2279         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2280       } else {
2281         // This obj doesn't extend into next region; size() is safe to use.
2282         obj_size = cast_to_oop(cur_addr)->size();
2283       }
2284 
2285       markWord mark = safe_mark_word_prototype(cur_addr, end_addr);
2286 
2287       // Perform the move and update of the object
2288       closure.do_addr(cur_addr, obj_size, mark);
2289 
2290       cur_addr += obj_size;
2291     } while (cur_addr < end_addr && !closure.is_full());
2292 
2293     if (closure.is_full()) {
2294       decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2295       closure.complete_region(dest_addr, region_ptr);
2296       return;
2297     }
2298 
2299     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2300 
2301     // Move to the next source region, possibly switching spaces as well.  All
2302     // args except end_addr may be modified.
2303     src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2304   } while (true);
2305 }
2306 
2307 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2308 {
2309   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);

2389 }
2390 
2391 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2392 {
2393   size_t words = MIN2(partial_obj_size, words_remaining());
2394 
2395   // This test is necessary; if omitted, the pointer updates to a partial object
2396   // that crosses the dense prefix boundary could be overwritten.
2397   if (source() != copy_destination()) {
2398     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2399     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2400   }
2401   update_state(words);
2402 }
2403 
2404 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2405   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2406   region_ptr->set_completed();
2407 }
2408 
2409 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words, markWord mark) {
2410   assert(destination() != nullptr, "sanity");
2411   _source = addr;
2412 
2413   // The start_array must be updated even if the object is not moving.
2414   if (_start_array != nullptr) {
2415     _start_array->update_for_block(destination(), destination() + words);
2416   }
2417 
2418   // Avoid overflow
2419   words = MIN2(words, words_remaining());
2420   assert(words > 0, "inv");
2421 
2422   if (copy_destination() != source()) {
2423     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2424     assert(source() != destination(), "inv");
2425     assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2426     assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2427     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2428     cast_to_oop(copy_destination())->set_mark(mark);
2429   }
2430 
2431   update_state(words);
2432 }
2433 
2434 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2435   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2436   // Record the shadow region index
2437   region_ptr->set_shadow_region(_shadow);
2438   // Mark the shadow region as filled to indicate the data is ready to be
2439   // copied back
2440   region_ptr->mark_filled();
2441   // Try to copy the content of the shadow region back to its corresponding
2442   // heap region if available; the GC thread that decreases the destination
2443   // count to zero will do the copying otherwise (see
2444   // PSParallelCompact::decrement_destination_counts).
2445   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2446     region_ptr->set_completed();
2447     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2448     ParCompactionManager::push_shadow_region_mt_safe(_shadow);
< prev index next >