1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "compiler/oopMap.hpp"
  31 #include "gc/parallel/objectStartArray.inline.hpp"
  32 #include "gc/parallel/parallelArguments.hpp"
  33 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  34 #include "gc/parallel/parMarkBitMap.inline.hpp"
  35 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  36 #include "gc/parallel/psCompactionManagerNew.inline.hpp"
  37 #include "gc/parallel/psOldGen.hpp"
  38 #include "gc/parallel/psParallelCompactNew.inline.hpp"
  39 #include "gc/parallel/psPromotionManager.inline.hpp"
  40 #include "gc/parallel/psScavenge.hpp"
  41 #include "gc/parallel/psYoungGen.hpp"
  42 #include "gc/shared/classUnloadingContext.hpp"
  43 #include "gc/shared/fullGCForwarding.inline.hpp"
  44 #include "gc/shared/gcCause.hpp"
  45 #include "gc/shared/gcHeapSummary.hpp"
  46 #include "gc/shared/gcId.hpp"
  47 #include "gc/shared/gcLocker.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/gcVMOperations.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/oopStorageSetParState.inline.hpp"
  54 #include "gc/shared/preservedMarks.inline.hpp"
  55 #include "gc/shared/referencePolicy.hpp"
  56 #include "gc/shared/referenceProcessor.hpp"
  57 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  58 #include "gc/shared/spaceDecorator.hpp"
  59 #include "gc/shared/taskTerminator.hpp"
  60 #include "gc/shared/weakProcessor.inline.hpp"
  61 #include "gc/shared/workerPolicy.hpp"
  62 #include "gc/shared/workerThread.hpp"
  63 #include "gc/shared/workerUtils.hpp"
  64 #include "logging/log.hpp"
  65 #include "memory/iterator.inline.hpp"
  66 #include "memory/memoryReserver.hpp"
  67 #include "memory/metaspaceUtils.hpp"
  68 #include "memory/resourceArea.hpp"
  69 #include "memory/universe.hpp"
  70 #include "nmt/memTracker.hpp"
  71 #include "oops/methodData.hpp"
  72 #include "runtime/java.hpp"
  73 #include "runtime/safepoint.hpp"
  74 #include "runtime/threads.hpp"
  75 #include "runtime/vmThread.hpp"
  76 #include "services/memoryService.hpp"
  77 #include "utilities/align.hpp"
  78 #include "utilities/debug.hpp"
  79 #include "utilities/events.hpp"
  80 #include "utilities/formatBuffer.hpp"
  81 #include "utilities/macros.hpp"
  82 #if INCLUDE_JVMCI
  83 #include "jvmci/jvmci.hpp"
  84 #endif
  85 
  86 SpaceInfoNew PSParallelCompactNew::_space_info[PSParallelCompactNew::last_space_id];
  87 
  88 size_t PSParallelCompactNew::_num_regions;
  89 PCRegionData* PSParallelCompactNew::_region_data_array;
  90 size_t PSParallelCompactNew::_num_regions_serial;
  91 PCRegionData* PSParallelCompactNew::_region_data_array_serial;
  92 PCRegionData** PSParallelCompactNew::_per_worker_region_data;
  93 bool PSParallelCompactNew::_serial = false;
  94 
  95 SpanSubjectToDiscoveryClosure PSParallelCompactNew::_span_based_discoverer;
  96 ReferenceProcessor* PSParallelCompactNew::_ref_processor = nullptr;
  97 
  98 void PSParallelCompactNew::print_on(outputStream* st) {
  99   _mark_bitmap.print_on(st);
 100 }
 101 
 102 STWGCTimer          PSParallelCompactNew::_gc_timer;
 103 ParallelOldTracer   PSParallelCompactNew::_gc_tracer;
 104 elapsedTimer        PSParallelCompactNew::_accumulated_time;
 105 unsigned int        PSParallelCompactNew::_maximum_compaction_gc_num = 0;
 106 CollectorCounters*  PSParallelCompactNew::_counters = nullptr;
 107 ParMarkBitMap       PSParallelCompactNew::_mark_bitmap;
 108 
 109 PSParallelCompactNew::IsAliveClosure PSParallelCompactNew::_is_alive_closure;
 110 
 111 class PCAdjustPointerClosure: public BasicOopIterateClosure {
 112   template <typename T>
 113   void do_oop_work(T* p) { PSParallelCompactNew::adjust_pointer(p); }
 114 
 115 public:
 116   void do_oop(oop* p) final          { do_oop_work(p); }
 117   void do_oop(narrowOop* p) final    { do_oop_work(p); }
 118 
 119   ReferenceIterationMode reference_iteration_mode() final { return DO_FIELDS; }
 120 };
 121 
 122 static PCAdjustPointerClosure pc_adjust_pointer_closure;
 123 
 124 class IsAliveClosure: public BoolObjectClosure {
 125 public:
 126   bool do_object_b(oop p) final;
 127 };
 128 
 129 
 130 bool PSParallelCompactNew::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 131 
 132 void PSParallelCompactNew::post_initialize() {
 133   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 134   _span_based_discoverer.set_span(heap->reserved_region());
 135   _ref_processor =
 136     new ReferenceProcessor(&_span_based_discoverer,
 137                            ParallelGCThreads,   // mt processing degree
 138                            ParallelGCThreads,   // mt discovery degree
 139                            false,               // concurrent_discovery
 140                            &_is_alive_closure); // non-header is alive closure
 141 
 142   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 143 
 144   // Initialize static fields in ParCompactionManager.
 145   ParCompactionManagerNew::initialize(mark_bitmap());
 146 }
 147 
 148 bool PSParallelCompactNew::initialize_aux_data() {
 149   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 150   MemRegion mr = heap->reserved_region();
 151   assert(mr.byte_size() != 0, "heap should be reserved");
 152 
 153   initialize_space_info();
 154 
 155   if (!_mark_bitmap.initialize(mr)) {
 156     vm_shutdown_during_initialization(
 157       err_msg("Unable to allocate %zuKB bitmaps for parallel "
 158       "garbage collection for the requested %zuKB heap.",
 159       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 160     return false;
 161   }
 162 
 163   return true;
 164 }
 165 
 166 void PSParallelCompactNew::initialize_space_info()
 167 {
 168   memset(&_space_info, 0, sizeof(_space_info));
 169 
 170   PSYoungGen* young_gen = ParallelScavengeHeap::heap()->young_gen();
 171 
 172   _space_info[old_space_id].set_space(ParallelScavengeHeap::heap()->old_gen()->object_space());
 173   _space_info[eden_space_id].set_space(young_gen->eden_space());
 174   _space_info[from_space_id].set_space(young_gen->from_space());
 175   _space_info[to_space_id].set_space(young_gen->to_space());
 176 
 177   _space_info[old_space_id].set_start_array(ParallelScavengeHeap::heap()->old_gen()->start_array());
 178 }
 179 
 180 void
 181 PSParallelCompactNew::clear_data_covering_space(SpaceId id)
 182 {
 183   // At this point, top is the value before GC, new_top() is the value that will
 184   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 185   // should be marked above top.
 186   MutableSpace* const space = _space_info[id].space();
 187   HeapWord* const bot = space->bottom();
 188   HeapWord* const top = space->top();
 189 
 190   _mark_bitmap.clear_range(bot, top);
 191 }
 192 
 193 void PSParallelCompactNew::pre_compact()
 194 {
 195   // Update the from & to space pointers in space_info, since they are swapped
 196   // at each young gen gc.  Do the update unconditionally (even though a
 197   // promotion failure does not swap spaces) because an unknown number of young
 198   // collections will have swapped the spaces an unknown number of times.
 199   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 200   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 201   _space_info[from_space_id].set_space(ParallelScavengeHeap::heap()->young_gen()->from_space());
 202   _space_info[to_space_id].set_space(ParallelScavengeHeap::heap()->young_gen()->to_space());
 203 
 204   // Increment the invocation count
 205   heap->increment_total_collections(true);
 206 
 207   CodeCache::on_gc_marking_cycle_start();
 208 
 209   heap->print_before_gc();
 210   heap->trace_heap_before_gc(&_gc_tracer);
 211 
 212   // Fill in TLABs
 213   heap->ensure_parsability(true);  // retire TLABs
 214 
 215   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 216     Universe::verify("Before GC");
 217   }
 218 
 219   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 220 }
 221 
 222 void PSParallelCompactNew::post_compact()
 223 {
 224   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 225 
 226   CodeCache::on_gc_marking_cycle_finish();
 227   CodeCache::arm_all_nmethods();
 228 
 229   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 230     // Clear the marking bitmap, summary data and split info.
 231     clear_data_covering_space(SpaceId(id));
 232   }
 233 
 234   {
 235     PCRegionData* last_live[last_space_id];
 236     for (uint i = old_space_id; i < last_space_id; ++i) {
 237       last_live[i] = nullptr;
 238     }
 239 
 240     // Figure out last region in each space that has live data.
 241     uint space_id = old_space_id;
 242     MutableSpace* space = _space_info[space_id].space();
 243     size_t num_regions = get_num_regions();
 244     PCRegionData* region_data_array = get_region_data_array();
 245     last_live[space_id] = &region_data_array[0];
 246     for (size_t idx = 0; idx < num_regions; idx++) {
 247       PCRegionData* rd = region_data_array + idx;
 248       if(!space->contains(rd->bottom())) {
 249         ++space_id;
 250         assert(space_id < last_space_id, "invariant");
 251         space = _space_info[space_id].space();
 252         log_develop_trace(gc, compaction)("Last live for space: %u: %zu", space_id, idx);
 253         last_live[space_id] = rd;
 254       }
 255       assert(space->contains(rd->bottom()), "next space should contain next region");
 256       log_develop_trace(gc, compaction)("post-compact region: idx: %zu, bottom: " PTR_FORMAT ", new_top: " PTR_FORMAT ", end: " PTR_FORMAT, rd->idx(), p2i(rd->bottom()), p2i(rd->new_top()), p2i(rd->end()));
 257       if (rd->new_top() > rd->bottom()) {
 258         last_live[space_id] = rd;
 259         log_develop_trace(gc, compaction)("Bump last live for space: %u", space_id);
 260       }
 261     }
 262 
 263     for (uint i = old_space_id; i < last_space_id; ++i) {
 264       PCRegionData* rd = last_live[i];
 265         log_develop_trace(gc, compaction)(
 266                 "Last live region in space: %u, compaction region, " PTR_FORMAT ", #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT,
 267                 i, p2i(rd), rd->idx(),
 268                 p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()));
 269     }
 270 
 271     // Fill all gaps and update the space boundaries.
 272     space_id = old_space_id;
 273     space = _space_info[space_id].space();
 274     size_t total_live = 0;
 275     size_t total_waste = 0;
 276     for (size_t idx = 0; idx < num_regions; idx++) {
 277       PCRegionData* rd = &region_data_array[idx];
 278       PCRegionData* last_live_in_space = last_live[space_id];
 279       assert(last_live_in_space != nullptr, "last live must not be null");
 280       if (rd != last_live_in_space) {
 281         if (rd->new_top() < rd->end()) {
 282           ObjectStartArray* sa = start_array(SpaceId(space_id));
 283           if (sa != nullptr) {
 284             sa->update_for_block(rd->new_top(), rd->end());
 285           }
 286           ParallelScavengeHeap::heap()->fill_with_dummy_object(rd->new_top(), rd->end(), false);
 287         }
 288         size_t live = pointer_delta(rd->new_top(), rd->bottom());
 289         size_t waste = pointer_delta(rd->end(), rd->new_top());
 290         total_live += live;
 291         total_waste += waste;
 292         log_develop_trace(gc, compaction)(
 293                 "Live compaction region, #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT ", live: %zu, waste: %zu",
 294                 rd->idx(),
 295                 p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()), live, waste);
 296       } else {
 297         // Update top of space.
 298         space->set_top(rd->new_top());
 299         size_t live = pointer_delta(rd->new_top(), rd->bottom());
 300         total_live += live;
 301         log_develop_trace(gc, compaction)(
 302                 "Live compaction region, #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT ", live: %zu, waste: %zu",
 303                 rd->idx(),
 304                 p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()), live, size_t(0));
 305 
 306         // Fast-Forward to next space.
 307         for (; idx < num_regions - 1; idx++) {
 308           rd = &region_data_array[idx + 1];
 309           if (!space->contains(rd->bottom())) {
 310             space_id++;
 311             assert(space_id < last_space_id, "must be");
 312             space = _space_info[space_id].space();
 313             assert(space->contains(rd->bottom()), "space must contain region");
 314             break;
 315           }
 316         }
 317       }
 318     }
 319     log_debug(gc, compaction)("total live: %zu, total waste: %zu, ratio: %f", total_live, total_waste, ((float)total_waste)/((float)(total_live + total_waste)));
 320   }
 321   {
 322     FREE_C_HEAP_ARRAY(PCRegionData*, _per_worker_region_data);
 323     FREE_C_HEAP_ARRAY(PCRegionData, _region_data_array);
 324     FREE_C_HEAP_ARRAY(PCRegionData, _region_data_array_serial);
 325   }
 326 #ifdef ASSERT
 327   {
 328     mark_bitmap()->verify_clear();
 329   }
 330 #endif
 331 
 332   ParCompactionManagerNew::flush_all_string_dedup_requests();
 333 
 334   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 335   MutableSpace* const from_space = _space_info[from_space_id].space();
 336   MutableSpace* const to_space   = _space_info[to_space_id].space();
 337 
 338   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 339   bool eden_empty = eden_space->is_empty();
 340 
 341   // Update heap occupancy information which is used as input to the soft ref
 342   // clearing policy at the next gc.
 343   Universe::heap()->update_capacity_and_used_at_gc();
 344 
 345   bool young_gen_empty = eden_empty && from_space->is_empty() &&
 346     to_space->is_empty();
 347 
 348   PSCardTable* ct = heap->card_table();
 349   MemRegion old_mr = ParallelScavengeHeap::heap()->old_gen()->committed();
 350   if (young_gen_empty) {
 351     ct->clear_MemRegion(old_mr);
 352   } else {
 353     ct->dirty_MemRegion(old_mr);
 354   }
 355 
 356   {
 357     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 358     GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
 359     ClassLoaderDataGraph::purge(true /* at_safepoint */);
 360     DEBUG_ONLY(MetaspaceUtils::verify();)
 361   }
 362 
 363   // Need to clear claim bits for the next mark.
 364   ClassLoaderDataGraph::clear_claimed_marks();
 365 
 366   heap->prune_scavengable_nmethods();
 367 
 368 #if COMPILER2_OR_JVMCI
 369   DerivedPointerTable::update_pointers();
 370 #endif
 371 
 372   // Signal that we have completed a visit to all live objects.
 373   Universe::heap()->record_whole_heap_examined_timestamp();
 374 }
 375 
 376 void PSParallelCompactNew::setup_regions_parallel() {
 377   const size_t region_size_words = calculate_region_size();
 378 
 379   size_t num_regions = 0;
 380   for (uint i = old_space_id; i < last_space_id; ++i) {
 381     MutableSpace* const space = _space_info[i].space();
 382     size_t const space_size_words = space->capacity_in_words();
 383     num_regions += align_up(space_size_words, region_size_words) / region_size_words;
 384   }
 385   _region_data_array = NEW_C_HEAP_ARRAY(PCRegionData, num_regions, mtGC);
 386 
 387   size_t region_idx = 0;
 388   for (uint i = old_space_id; i < last_space_id; ++i) {
 389     const MutableSpace* space = _space_info[i].space();
 390     HeapWord* addr = space->bottom();
 391     HeapWord* sp_end = space->end();
 392     HeapWord* sp_top = space->top();
 393     while (addr < sp_end) {
 394       HeapWord* end = MIN2(align_up(addr + region_size_words, region_size_words), space->end());
 395       if (addr < sp_top) {
 396         HeapWord* prev_obj_start = _mark_bitmap.find_obj_beg_reverse(addr, end);
 397         if (prev_obj_start < end) {
 398           HeapWord* prev_obj_end = prev_obj_start + cast_to_oop(prev_obj_start)->size();
 399           if (end < prev_obj_end) {
 400             // Object crosses region boundary, adjust end to be after object's last word.
 401             end = prev_obj_end;
 402           }
 403         }
 404       }
 405       assert(region_idx < num_regions, "must not exceed number of regions: region_idx: %zu, num_regions: %zu", region_idx, num_regions);
 406       HeapWord* top;
 407       if (sp_top < addr) {
 408         top = addr;
 409       } else if (sp_top >= end) {
 410         top = end;
 411       } else {
 412         top = sp_top;
 413       }
 414       assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr must be in heap: " PTR_FORMAT, p2i(addr));
 415       new (_region_data_array + region_idx) PCRegionData(region_idx, addr, top, end);
 416       addr = end;
 417       region_idx++;
 418     }
 419   }
 420   _num_regions = region_idx;
 421   log_info(gc)("Number of regions: %zu, region size: " EXACTFMT, _num_regions, EXACTFMTARGS(region_size_words * HeapWordSize));
 422 }
 423 
 424 size_t PSParallelCompactNew::calculate_region_size() {
 425   // Minimum 0.5MB region size
 426   static const size_t FLOOR_REGION_SIZE_WORDS = (SpaceAlignment / HeapWordSize);
 427   size_t total_heap_words = 0;
 428   for (uint i = old_space_id; i < last_space_id; ++i) {
 429     total_heap_words += _space_info[i].space()->capacity_in_words();
 430   }
 431 
 432   // Per-worker region count for dynamic region sizing
 433   static const uint REGIONS_PER_WORKER = 20;  // Based on 5% boundary waste threshold
 434   const uint max_workers = ParallelScavengeHeap::heap()->workers().max_workers();
 435   const size_t total_regions_count = (size_t)max_workers * REGIONS_PER_WORKER;
 436   const size_t dynamic_region_size_words = total_heap_words / total_regions_count;
 437   return round_up_power_of_2(MAX2(dynamic_region_size_words, FLOOR_REGION_SIZE_WORDS));
 438 }
 439 
 440 void PSParallelCompactNew::setup_regions_serial() {
 441   _num_regions_serial = last_space_id;
 442   _region_data_array_serial = NEW_C_HEAP_ARRAY(PCRegionData, _num_regions_serial, mtGC);
 443   new (_region_data_array_serial + old_space_id)  PCRegionData(old_space_id, space(old_space_id)->bottom(), space(old_space_id)->top(), space(old_space_id)->end());
 444   new (_region_data_array_serial + eden_space_id) PCRegionData(eden_space_id, space(eden_space_id)->bottom(), space(eden_space_id)->top(), space(eden_space_id)->end());
 445   new (_region_data_array_serial + from_space_id) PCRegionData(from_space_id, space(from_space_id)->bottom(), space(from_space_id)->top(), space(from_space_id)->end());
 446   new (_region_data_array_serial + to_space_id)   PCRegionData(to_space_id, space(to_space_id)->bottom(), space(to_space_id)->top(), space(to_space_id)->end());
 447 }
 448 
 449 bool PSParallelCompactNew::check_maximum_compaction(bool should_do_max_compaction) {
 450 
 451   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 452 
 453   // Check System.GC
 454   bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
 455                           && GCCause::is_user_requested_gc(heap->gc_cause());
 456 
 457   return should_do_max_compaction
 458       || is_max_on_system_gc;
 459 }
 460 
 461 void PSParallelCompactNew::summary_phase() {
 462   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
 463 
 464   setup_regions_serial();
 465   setup_regions_parallel();
 466 
 467 #ifndef PRODUCT
 468   for (size_t idx = 0; idx < _num_regions; idx++) {
 469     PCRegionData* rd = &_region_data_array[idx];
 470     log_develop_trace(gc, compaction)("Compaction region #%zu: [" PTR_FORMAT ", " PTR_FORMAT ")", rd->idx(), p2i(
 471             rd->bottom()), p2i(rd->end()));
 472   }
 473 #endif
 474 }
 475 
 476 // This method should contain all heap-specific policy for invoking a full
 477 // collection.  invoke_no_policy() will only attempt to compact the heap; it
 478 // will do nothing further.  If we need to bail out for policy reasons, scavenge
 479 // before full gc, or any other specialized behavior, it needs to be added here.
 480 //
 481 // Note that this method should only be called from the vm_thread while at a
 482 // safepoint.
 483 //
 484 // Note that the all_soft_refs_clear flag in the soft ref policy
 485 // may be true because this method can be called without intervening
 486 // activity.  For example when the heap space is tight and full measure
 487 // are being taken to free space.
 488 bool PSParallelCompactNew::invoke(bool clear_all_soft_refs, bool should_do_max_compaction) {
 489   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 490   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
 491          "should be in vm thread");
 492 
 493   SvcGCMarker sgcm(SvcGCMarker::FULL);
 494   IsSTWGCActiveMark mark;
 495 
 496   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 497 
 498   GCIdMark gc_id_mark;
 499   _gc_timer.register_gc_start();
 500   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 501 
 502   GCCause::Cause gc_cause = heap->gc_cause();
 503   PSYoungGen* young_gen = ParallelScavengeHeap::heap()->young_gen();
 504   PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
 505   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 506 
 507   // Make sure data structures are sane, make the heap parsable, and do other
 508   // miscellaneous bookkeeping.
 509   pre_compact();
 510 
 511   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
 512 
 513   {
 514     const uint active_workers =
 515       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
 516                                         ParallelScavengeHeap::heap()->workers().active_workers(),
 517                                         Threads::number_of_non_daemon_threads());
 518     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
 519 
 520     if (check_maximum_compaction(should_do_max_compaction)) {
 521       // Serial compaction executes the forwarding and compaction phases serially,
 522       // thus achieving perfect compaction.
 523       // Marking and ajust-references would still be executed in parallel threads.
 524       _serial = true;
 525     } else {
 526       _serial = false;
 527     }
 528 
 529     GCTraceCPUTime tcpu(&_gc_tracer);
 530     GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
 531 
 532     heap->pre_full_gc_dump(&_gc_timer);
 533 
 534     TraceCollectorStats tcs(counters());
 535     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
 536 
 537     if (log_is_enabled(Debug, gc, heap, exit)) {
 538       accumulated_time()->start();
 539     }
 540 
 541     // Let the size policy know we're starting
 542     size_policy->major_collection_begin();
 543 
 544 #if COMPILER2_OR_JVMCI
 545     DerivedPointerTable::clear();
 546 #endif
 547 
 548     ref_processor()->start_discovery(clear_all_soft_refs);
 549 
 550     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
 551                               false /* unregister_nmethods_during_purge */,
 552                               false /* lock_nmethod_free_separately */);
 553 
 554     marking_phase(&_gc_tracer);
 555 
 556     summary_phase();
 557 
 558 #if COMPILER2_OR_JVMCI
 559     assert(DerivedPointerTable::is_active(), "Sanity");
 560     DerivedPointerTable::set_active(false);
 561 #endif
 562 
 563     FullGCForwarding::begin();
 564 
 565     forward_to_new_addr();
 566 
 567     adjust_pointers();
 568 
 569     compact();
 570 
 571     FullGCForwarding::end();
 572 
 573     ParCompactionManagerNew::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
 574 
 575     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
 576     // done before resizing.
 577     post_compact();
 578 
 579     // Let the size policy know we're done
 580     size_policy->major_collection_end();
 581 
 582     size_policy->sample_old_gen_used_bytes(MAX2(pre_gc_values.old_gen_used(), old_gen->used_in_bytes()));
 583 
 584     if (UseAdaptiveSizePolicy) {
 585       heap->resize_after_full_gc();
 586     }
 587 
 588     heap->resize_all_tlabs();
 589 
 590     // Resize the metaspace capacity after a collection
 591     MetaspaceGC::compute_new_size();
 592 
 593     if (log_is_enabled(Debug, gc, heap, exit)) {
 594       accumulated_time()->stop();
 595     }
 596 
 597     heap->print_heap_change(pre_gc_values);
 598 
 599     // Track memory usage and detect low memory
 600     MemoryService::track_memory_usage();
 601     heap->update_counters();
 602 
 603     heap->post_full_gc_dump(&_gc_timer);
 604 
 605     size_policy->record_gc_pause_end_instant();
 606   }
 607 
 608   heap->gc_epilogue(true);
 609 
 610   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 611     Universe::verify("After GC");
 612   }
 613 
 614   heap->print_after_gc();
 615   heap->trace_heap_after_gc(&_gc_tracer);
 616 
 617   _gc_timer.register_gc_end();
 618 
 619   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
 620 
 621   return true;
 622 }
 623 
 624 class PCAddThreadRootsMarkingTaskClosureNew : public ThreadClosure {
 625 private:
 626   uint _worker_id;
 627 
 628 public:
 629   explicit PCAddThreadRootsMarkingTaskClosureNew(uint worker_id) : _worker_id(worker_id) { }
 630   void do_thread(Thread* thread) final {
 631     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
 632 
 633     ResourceMark rm;
 634 
 635     ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(_worker_id);
 636 
 637     MarkingNMethodClosure mark_and_push_in_blobs(&cm->_mark_and_push_closure);
 638 
 639     thread->oops_do(&cm->_mark_and_push_closure, &mark_and_push_in_blobs);
 640 
 641     // Do the real work
 642     cm->follow_marking_stacks();
 643   }
 644 };
 645 
 646 void steal_marking_work_new(TaskTerminator& terminator, uint worker_id) {
 647   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
 648 
 649   ParCompactionManagerNew* cm =
 650     ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 651 
 652   do {
 653     ScannerTask task;
 654     if (ParCompactionManagerNew::steal(worker_id, task)) {
 655       cm->follow_contents(task, true);
 656     }
 657     cm->follow_marking_stacks();
 658   } while (!terminator.offer_termination());
 659 }
 660 
 661 class MarkFromRootsTaskNew : public WorkerTask {
 662   NMethodMarkingScope _nmethod_marking_scope;
 663   ThreadsClaimTokenScope _threads_claim_token_scope;
 664   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
 665   TaskTerminator _terminator;
 666   uint _active_workers;
 667 
 668 public:
 669   explicit MarkFromRootsTaskNew(uint active_workers) :
 670       WorkerTask("MarkFromRootsTaskNew"),
 671       _nmethod_marking_scope(),
 672       _threads_claim_token_scope(),
 673       _terminator(active_workers, ParCompactionManagerNew::marking_stacks()),
 674       _active_workers(active_workers) {}
 675 
 676   void work(uint worker_id) final {
 677     ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 678     {
 679       CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
 680       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
 681 
 682       // Do the real work
 683       cm->follow_marking_stacks();
 684     }
 685 
 686     {
 687       PCAddThreadRootsMarkingTaskClosureNew closure(worker_id);
 688       Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
 689     }
 690 
 691     // Mark from OopStorages
 692     {
 693       _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure);
 694       // Do the real work
 695       cm->follow_marking_stacks();
 696     }
 697 
 698     if (_active_workers > 1) {
 699       steal_marking_work_new(_terminator, worker_id);
 700     }
 701   }
 702 };
 703 
 704 class ParallelCompactRefProcProxyTaskNew : public RefProcProxyTask {
 705   TaskTerminator _terminator;
 706 
 707 public:
 708   explicit ParallelCompactRefProcProxyTaskNew(uint max_workers)
 709     : RefProcProxyTask("ParallelCompactRefProcProxyTaskNew", max_workers),
 710       _terminator(_max_workers, ParCompactionManagerNew::marking_stacks()) {}
 711 
 712   void work(uint worker_id) final {
 713     assert(worker_id < _max_workers, "sanity");
 714     ParCompactionManagerNew* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManagerNew::get_vmthread_cm() : ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 715     BarrierEnqueueDiscoveredFieldClosure enqueue;
 716     ParCompactionManagerNew::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
 717     _rp_task->rp_work(worker_id, PSParallelCompactNew::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc);
 718   }
 719 
 720   void prepare_run_task_hook() final {
 721     _terminator.reset_for_reuse(_queue_count);
 722   }
 723 };
 724 
 725 void PSParallelCompactNew::marking_phase(ParallelOldTracer *gc_tracer) {
 726   // Recursively traverse all live objects and mark them
 727   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
 728 
 729   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
 730 
 731   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
 732   {
 733     GCTraceTime(Debug, gc, phases) pm_tm("Par Mark", &_gc_timer);
 734 
 735     MarkFromRootsTaskNew task(active_gc_threads);
 736     ParallelScavengeHeap::heap()->workers().run_task(&task);
 737   }
 738 
 739   // Process reference objects found during marking
 740   {
 741     GCTraceTime(Debug, gc, phases) rp_tm("Reference Processing", &_gc_timer);
 742 
 743     ReferenceProcessorStats stats;
 744     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
 745 
 746     ParallelCompactRefProcProxyTaskNew task(ref_processor()->max_num_queues());
 747     stats = ref_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt);
 748 
 749     gc_tracer->report_gc_reference_stats(stats);
 750     pt.print_all_references();
 751   }
 752 
 753   // This is the point where the entire marking should have completed.
 754   ParCompactionManagerNew::verify_all_marking_stack_empty();
 755 
 756   {
 757     GCTraceTime(Debug, gc, phases) wp_tm("Weak Processing", &_gc_timer);
 758     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
 759                                 is_alive_closure(),
 760                                 &do_nothing_cl,
 761                                 1);
 762   }
 763 
 764   {
 765     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
 766 
 767     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
 768 
 769     bool unloading_occurred;
 770     {
 771       CodeCache::UnlinkingScope scope(is_alive_closure());
 772 
 773       // Follow system dictionary roots and unload classes.
 774       unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
 775 
 776       // Unload nmethods.
 777       CodeCache::do_unloading(unloading_occurred);
 778     }
 779 
 780     {
 781       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
 782       // Release unloaded nmethod's memory.
 783       ctx->purge_nmethods();
 784     }
 785     {
 786       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
 787       ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
 788     }
 789     {
 790       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
 791       ctx->free_nmethods();
 792     }
 793 
 794     // Prune dead klasses from subklass/sibling/implementor lists.
 795     Klass::clean_weak_klass_links(unloading_occurred);
 796 
 797     // Clean JVMCI metadata handles.
 798     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
 799   }
 800 
 801   {
 802     GCTraceTime(Debug, gc, phases) roc_tm("Report Object Count", &_gc_timer);
 803     _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
 804   }
 805 #if TASKQUEUE_STATS
 806   ParCompactionManagerNew::print_and_reset_taskqueue_stats();
 807 #endif
 808 }
 809 
 810 void PSParallelCompactNew::adjust_pointers_in_spaces(uint worker_id) {
 811   auto start_time = Ticks::now();
 812   for (size_t i = 0; i < _num_regions; i++) {
 813     PCRegionData* region = &_region_data_array[i];
 814     if (!region->claim()) {
 815       continue;
 816     }
 817     log_trace(gc, compaction)("Adjusting pointers in region: %zu (worker_id: %u)", region->idx(), worker_id);
 818     HeapWord* end = region->top();
 819     HeapWord* current = _mark_bitmap.find_obj_beg(region->bottom(), end);
 820     while (current < end) {
 821       assert(_mark_bitmap.is_marked(current), "must be marked");
 822       oop obj = cast_to_oop(current);
 823       size_t size = obj->size();
 824       obj->oop_iterate(&pc_adjust_pointer_closure);
 825       current = _mark_bitmap.find_obj_beg(current + size, end);
 826     }
 827   }
 828   log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
 829 }
 830 
 831 class PSAdjustTaskNew final : public WorkerTask {
 832   SubTasksDone                               _sub_tasks;
 833   WeakProcessor::Task                        _weak_proc_task;
 834   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
 835   uint                                       _nworkers;
 836 
 837   enum PSAdjustSubTask {
 838     PSAdjustSubTask_code_cache,
 839 
 840     PSAdjustSubTask_num_elements
 841   };
 842 
 843 public:
 844   explicit PSAdjustTaskNew(uint nworkers) :
 845     WorkerTask("PSAdjust task"),
 846     _sub_tasks(PSAdjustSubTask_num_elements),
 847     _weak_proc_task(nworkers),
 848     _nworkers(nworkers) {
 849 
 850     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
 851     if (nworkers > 1) {
 852       Threads::change_thread_claim_token();
 853     }
 854   }
 855 
 856   ~PSAdjustTaskNew() {
 857     Threads::assert_all_threads_claimed();
 858   }
 859 
 860   void work(uint worker_id) final {
 861     ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 862     cm->preserved_marks()->adjust_during_full_gc();
 863     {
 864       // adjust pointers in all spaces
 865       PSParallelCompactNew::adjust_pointers_in_spaces(worker_id);
 866     }
 867     {
 868       ResourceMark rm;
 869       Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
 870     }
 871     _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
 872     {
 873       CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
 874       ClassLoaderDataGraph::cld_do(&cld_closure);
 875     }
 876     {
 877       AlwaysTrueClosure always_alive;
 878       _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
 879     }
 880     if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
 881       NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
 882       CodeCache::nmethods_do(&adjust_code);
 883     }
 884     _sub_tasks.all_tasks_claimed();
 885   }
 886 };
 887 
 888 void PSParallelCompactNew::adjust_pointers() {
 889   // Adjust the pointers to reflect the new locations
 890   GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
 891   uint num_workers = ParallelScavengeHeap::heap()->workers().active_workers();
 892   PSAdjustTaskNew task(num_workers);
 893   ParallelScavengeHeap::heap()->workers().run_task(&task);
 894 }
 895 
 896 void PSParallelCompactNew::forward_to_new_addr() {
 897   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
 898   uint num_workers = get_num_workers();
 899   _per_worker_region_data = NEW_C_HEAP_ARRAY(PCRegionData*, num_workers, mtGC);
 900   for (uint i = 0; i < num_workers; i++) {
 901     _per_worker_region_data[i] = nullptr;
 902   }
 903 
 904   class ForwardState {
 905     uint _worker_id;
 906     PCRegionData* _compaction_region;
 907     HeapWord* _compaction_point;
 908 
 909     void ensure_compaction_point() {
 910       if (_compaction_point == nullptr) {
 911         assert(_compaction_region == nullptr, "invariant");
 912         _compaction_region = _per_worker_region_data[_worker_id];
 913         assert(_compaction_region != nullptr, "invariant");
 914         _compaction_point = _compaction_region->bottom();
 915       }
 916     }
 917   public:
 918     explicit ForwardState(uint worker_id) :
 919             _worker_id(worker_id),
 920             _compaction_region(nullptr),
 921             _compaction_point(nullptr) {
 922     }
 923 
 924     size_t available() const {
 925       return pointer_delta(_compaction_region->end(), _compaction_point);
 926     }
 927 
 928     void forward_objs_in_region(ParCompactionManagerNew* cm, PCRegionData* region) {
 929       ensure_compaction_point();
 930       HeapWord* end = region->end();
 931       HeapWord* current = _mark_bitmap.find_obj_beg(region->bottom(), end);
 932       while (current < end) {
 933         assert(_mark_bitmap.is_marked(current), "must be marked");
 934         oop obj = cast_to_oop(current);
 935         assert(region->contains(obj), "object must not cross region boundary: obj: " PTR_FORMAT ", obj_end: " PTR_FORMAT ", region start: " PTR_FORMAT ", region end: " PTR_FORMAT, p2i(obj), p2i(cast_from_oop<HeapWord*>(obj) + obj->size()), p2i(region->bottom()), p2i(region->end()));
 936         size_t old_size = obj->size();
 937         size_t new_size = obj->copy_size(old_size, obj->mark());
 938         size_t size = (current == _compaction_point) ? old_size : new_size;
 939         while (size > available()) {
 940           _compaction_region->set_new_top(_compaction_point);
 941           _compaction_region = _compaction_region->local_next();
 942           assert(_compaction_region != nullptr, "must find a compaction region");
 943           _compaction_point = _compaction_region->bottom();
 944           size = (current == _compaction_point) ? old_size : new_size;
 945         }
 946         //log_develop_trace(gc, compaction)("Forwarding obj: " PTR_FORMAT ", to: " PTR_FORMAT, p2i(obj), p2i(_compaction_point));
 947         if (current != _compaction_point) {
 948           cm->preserved_marks()->push_if_necessary(obj, obj->mark());
 949           FullGCForwarding::forward_to(obj, cast_to_oop(_compaction_point));
 950         }
 951         _compaction_point += size;
 952         assert(_compaction_point <= _compaction_region->end(), "object must fit in region");
 953         current += old_size;
 954         assert(current <= end, "object must not cross region boundary");
 955         current = _mark_bitmap.find_obj_beg(current, end);
 956       }
 957     }
 958     void finish() {
 959       if (_compaction_region != nullptr) {
 960         _compaction_region->set_new_top(_compaction_point);
 961       }
 962     }
 963   };
 964 
 965   struct ForwardTask final : public WorkerTask {
 966     ForwardTask() : WorkerTask("PSForward task") {}
 967 
 968     void work(uint worker_id) override {
 969       ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 970       ForwardState state(worker_id);
 971       PCRegionData** last_link = &_per_worker_region_data[worker_id];
 972       size_t idx = worker_id;
 973       uint num_workers = get_num_workers();
 974       size_t num_regions = get_num_regions();
 975       PCRegionData* region_data_array = get_region_data_array();
 976       while (idx < num_regions) {
 977         PCRegionData* region = region_data_array + idx;
 978         *last_link = region;
 979         last_link = region->local_next_addr();
 980         state.forward_objs_in_region(cm, region);
 981         idx += num_workers;
 982       }
 983       state.finish();
 984     }
 985   } task;
 986 
 987   uint par_workers = ParallelScavengeHeap::heap()->workers().active_workers();
 988   ParallelScavengeHeap::heap()->workers().set_active_workers(num_workers);
 989   ParallelScavengeHeap::heap()->workers().run_task(&task);
 990   ParallelScavengeHeap::heap()->workers().set_active_workers(par_workers);
 991 
 992 #ifndef PRODUCT
 993   for (uint wid = 0; wid < num_workers; wid++) {
 994     for (PCRegionData* rd = _per_worker_region_data[wid]; rd != nullptr; rd = rd->local_next()) {
 995       log_develop_trace(gc, compaction)("Per worker compaction region, worker: %d, #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT, wid, rd->idx(),
 996                                         p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()));
 997     }
 998   }
 999 #endif
1000 }
1001 
1002 void PSParallelCompactNew::compact() {
1003   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
1004   class CompactTask final : public WorkerTask {
1005     static void compact_region(PCRegionData* region) {
1006       HeapWord* bottom = region->bottom();
1007       HeapWord* end = region->top();
1008       if (bottom == end) {
1009         return;
1010       }
1011       HeapWord* current = _mark_bitmap.find_obj_beg(bottom, end);
1012       while (current < end) {
1013         oop obj = cast_to_oop(current);
1014         size_t size = obj->size();
1015         if (FullGCForwarding::is_forwarded(obj)) {
1016           oop fwd = FullGCForwarding::forwardee(obj);
1017           auto* dst = cast_from_oop<HeapWord*>(fwd);
1018           ObjectStartArray* sa = start_array(space_id(dst));
1019           if (sa != nullptr) {
1020             assert(dst != current, "expect moving object");
1021             size_t new_words = obj->copy_size(size, obj->mark());
1022             sa->update_for_block(dst, dst + new_words);
1023           }
1024 
1025           Copy::aligned_conjoint_words(current, dst, size);
1026           fwd->reinit_mark();
1027           fwd->initialize_hash_if_necessary(obj);
1028         } else {
1029           // The start_array must be updated even if the object is not moving.
1030           ObjectStartArray* sa = start_array(space_id(current));
1031           if (sa != nullptr) {
1032             sa->update_for_block(current, current + size);
1033           }
1034         }
1035         current = _mark_bitmap.find_obj_beg(current + size, end);
1036       }
1037     }
1038   public:
1039     explicit CompactTask() : WorkerTask("PSCompact task") {}
1040     void work(uint worker_id) override {
1041       PCRegionData* region = _per_worker_region_data[worker_id];
1042       while (region != nullptr) {
1043         log_trace(gc)("Compact worker: %u, compacting region: %zu", worker_id, region->idx());
1044         compact_region(region);
1045         region = region->local_next();
1046       }
1047     }
1048   } task;
1049 
1050   uint num_workers = get_num_workers();
1051   uint par_workers = ParallelScavengeHeap::heap()->workers().active_workers();
1052   ParallelScavengeHeap::heap()->workers().set_active_workers(num_workers);
1053   ParallelScavengeHeap::heap()->workers().run_task(&task);
1054   ParallelScavengeHeap::heap()->workers().set_active_workers(par_workers);
1055 }
1056 
1057 // Return the SpaceId for the space containing addr.  If addr is not in the
1058 // heap, last_space_id is returned.  In debug mode it expects the address to be
1059 // in the heap and asserts such.
1060 PSParallelCompactNew::SpaceId PSParallelCompactNew::space_id(HeapWord* addr) {
1061   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1062 
1063   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1064     if (_space_info[id].space()->contains(addr)) {
1065       return SpaceId(id);
1066     }
1067   }
1068 
1069   assert(false, "no space contains the addr");
1070   return last_space_id;
1071 }