< prev index next >

src/hotspot/share/gc/serial/genMarkSweep.cpp

Print this page

 31 #include "classfile/vmSymbols.hpp"
 32 #include "code/codeCache.hpp"
 33 #include "compiler/oopMap.hpp"
 34 #include "gc/serial/cardTableRS.hpp"
 35 #include "gc/serial/defNewGeneration.hpp"
 36 #include "gc/serial/generation.hpp"
 37 #include "gc/serial/genMarkSweep.hpp"
 38 #include "gc/serial/markSweep.inline.hpp"
 39 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 40 #include "gc/serial/serialHeap.hpp"
 41 #include "gc/shared/classUnloadingContext.hpp"
 42 #include "gc/shared/collectedHeap.inline.hpp"
 43 #include "gc/shared/gcHeapSummary.hpp"
 44 #include "gc/shared/gcTimer.hpp"
 45 #include "gc/shared/gcTrace.hpp"
 46 #include "gc/shared/gcTraceTime.inline.hpp"
 47 #include "gc/shared/modRefBarrierSet.hpp"
 48 #include "gc/shared/preservedMarks.inline.hpp"
 49 #include "gc/shared/referencePolicy.hpp"
 50 #include "gc/shared/referenceProcessorPhaseTimes.hpp"

 51 #include "gc/shared/space.inline.hpp"
 52 #include "gc/shared/strongRootsScope.hpp"
 53 #include "gc/shared/weakProcessor.hpp"
 54 #include "memory/universe.hpp"
 55 #include "oops/instanceRefKlass.hpp"
 56 #include "oops/oop.inline.hpp"
 57 #include "prims/jvmtiExport.hpp"
 58 #include "runtime/handles.inline.hpp"
 59 #include "runtime/javaThread.hpp"
 60 #include "runtime/prefetch.inline.hpp"
 61 #include "runtime/synchronizer.hpp"
 62 #include "runtime/vmThread.hpp"
 63 #include "utilities/copy.hpp"
 64 #include "utilities/events.hpp"
 65 #include "utilities/stack.inline.hpp"
 66 #if INCLUDE_JVMCI
 67 #include "jvmci/jvmci.hpp"
 68 #endif
 69 
 70 class DeadSpacer : StackObj {

178   }
179 
180   static void prefetch_read_scan(void* p) {
181     if (PrefetchScanIntervalInBytes >= 0) {
182       Prefetch::read(p, PrefetchScanIntervalInBytes);
183     }
184   }
185 
186   static void prefetch_write_scan(void* p) {
187     if (PrefetchScanIntervalInBytes >= 0) {
188       Prefetch::write(p, PrefetchScanIntervalInBytes);
189     }
190   }
191 
192   static void prefetch_write_copy(void* p) {
193     if (PrefetchCopyIntervalInBytes >= 0) {
194       Prefetch::write(p, PrefetchCopyIntervalInBytes);
195     }
196   }
197 

198   static void forward_obj(oop obj, HeapWord* new_addr) {
199     prefetch_write_scan(obj);
200     if (cast_from_oop<HeapWord*>(obj) != new_addr) {
201       obj->forward_to(cast_to_oop(new_addr));
202     } else {
203       assert(obj->is_gc_marked(), "inv");
204       // This obj will stay in-place. Fix the markword.
205       obj->init_mark();
206     }
207   }
208 
209   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
210     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
211       prefetch_read_scan(i_addr);
212       oop obj = cast_to_oop(i_addr);
213       if (obj->is_gc_marked()) {
214         return i_addr;
215       }
216       i_addr += obj->size();
217     }
218     return end;
219   };
220 

221   static size_t relocate(HeapWord* addr) {
222     // Prefetch source and destination
223     prefetch_read_scan(addr);
224 
225     oop obj = cast_to_oop(addr);
226     oop new_obj = obj->forwardee();
227     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
228     assert(addr != new_addr, "inv");
229     prefetch_write_copy(new_addr);
230 
231     size_t obj_size = obj->size();
232     Copy::aligned_conjoint_words(addr, new_addr, obj_size);
233     new_obj->init_mark();
234 
235     return obj_size;
236   }
237 
238 public:
239   explicit Compacter(SerialHeap* heap) {
240     // In this order so that heap is compacted towards old-gen.
241     _spaces[0].init(heap->old_gen()->space());
242     _spaces[1].init(heap->young_gen()->eden());
243     _spaces[2].init(heap->young_gen()->from());
244 
245     bool is_promotion_failed = (heap->young_gen()->from()->next_compaction_space() != nullptr);
246     if (is_promotion_failed) {
247       _spaces[3].init(heap->young_gen()->to());
248       _num_spaces = 4;
249     } else {
250       _num_spaces = 3;
251     }
252     _index = 0;
253   }
254 

255   void phase2_calculate_new_addr() {
256     for (uint i = 0; i < _num_spaces; ++i) {
257       ContiguousSpace* space = get_space(i);
258       HeapWord* cur_addr = space->bottom();
259       HeapWord* top = space->top();
260 
261       bool record_first_dead_done = false;
262 
263       DeadSpacer dead_spacer(space);
264 
265       while (cur_addr < top) {
266         oop obj = cast_to_oop(cur_addr);
267         size_t obj_size = obj->size();
268         if (obj->is_gc_marked()) {
269           HeapWord* new_addr = alloc(obj_size);
270           forward_obj(obj, new_addr);
271           cur_addr += obj_size;
272         } else {
273           // Skipping the current known-unmarked obj
274           HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
275           if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
276             // Register space for the filler obj
277             alloc(pointer_delta(next_live_addr, cur_addr));
278           } else {
279             if (!record_first_dead_done) {
280               record_first_dead(i, cur_addr);
281               record_first_dead_done = true;
282             }
283             *(HeapWord**)cur_addr = next_live_addr;
284           }
285           cur_addr = next_live_addr;
286         }
287       }
288 
289       if (!record_first_dead_done) {
290         record_first_dead(i, top);
291       }
292     }
293   }
294 









295   void phase3_adjust_pointers() {
296     for (uint i = 0; i < _num_spaces; ++i) {
297       ContiguousSpace* space = get_space(i);
298       HeapWord* cur_addr = space->bottom();
299       HeapWord* const top = space->top();
300       HeapWord* const first_dead = get_first_dead(i);
301 
302       while (cur_addr < top) {
303         prefetch_write_scan(cur_addr);
304         if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
305           size_t size = MarkSweep::adjust_pointers(cast_to_oop(cur_addr));
306           cur_addr += size;
307         } else {
308           assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
309           cur_addr = *(HeapWord**)cur_addr;
310         }
311       }
312     }
313   }
314 









315   void phase4_compact() {
316     for (uint i = 0; i < _num_spaces; ++i) {
317       ContiguousSpace* space = get_space(i);
318       HeapWord* cur_addr = space->bottom();
319       HeapWord* top = space->top();
320 
321       // Check if the first obj inside this space is forwarded.
322       if (!cast_to_oop(cur_addr)->is_forwarded()) {
323         // Jump over consecutive (in-place) live-objs-chunk
324         cur_addr = get_first_dead(i);
325       }
326 
327       while (cur_addr < top) {
328         if (!cast_to_oop(cur_addr)->is_forwarded()) {
329           cur_addr = *(HeapWord**) cur_addr;
330           continue;
331         }
332         cur_addr += relocate(cur_addr);
333       }
334 
335       // Reset top and unused memory
336       space->set_top(get_compaction_top(i));
337       if (ZapUnusedHeapArea) {
338         space->mangle_unused_area();
339       }
340     }
341   }








342 };
343 
344 void GenMarkSweep::phase1_mark(bool clear_all_softrefs) {
345   // Recursively traverse all live objects and mark them
346   GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
347 
348   SerialHeap* gch = SerialHeap::heap();
349 
350   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
351 
352   ref_processor()->start_discovery(clear_all_softrefs);
353 
354   {
355     StrongRootsScope srs(0);
356 
357     CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure;
358     MarkingCodeBlobClosure mark_code_closure(&follow_root_closure, !CodeBlobToOopClosure::FixRelocations, true);
359     gch->process_roots(SerialHeap::SO_None,
360                        &follow_root_closure,
361                        &follow_cld_closure,

431   SerialHeap* gch = SerialHeap::heap();
432 #ifdef ASSERT
433   if (gch->soft_ref_policy()->should_clear_all_soft_refs()) {
434     assert(clear_all_softrefs, "Policy should have been checked earlier");
435   }
436 #endif
437 
438   gch->trace_heap_before_gc(_gc_tracer);
439 
440   // Increment the invocation count
441   _total_invocations++;
442 
443   // Capture used regions for old-gen to reestablish old-to-young invariant
444   // after full-gc.
445   gch->old_gen()->save_used_region();
446 
447   allocate_stacks();
448 
449   phase1_mark(clear_all_softrefs);
450 


451   Compacter compacter{gch};
452 
453   {
454     // Now all live objects are marked, compute the new object addresses.
455     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
456 
457     compacter.phase2_calculate_new_addr();
458   }
459 
460   // Don't add any more derived pointers during phase3
461 #if COMPILER2_OR_JVMCI
462   assert(DerivedPointerTable::is_active(), "Sanity");
463   DerivedPointerTable::set_active(false);
464 #endif
465 
466   {
467     // Adjust the pointers to reflect the new locations
468     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
469 
470     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
471 
472     CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
473     gch->process_roots(SerialHeap::SO_AllCodeCache,
474                        &adjust_pointer_closure,
475                        &adjust_cld_closure,
476                        &adjust_cld_closure,
477                        &code_closure);
478 
479     WeakProcessor::oops_do(&adjust_pointer_closure);















480 
481     adjust_marks();
482     compacter.phase3_adjust_pointers();
483   }
484 
485   {
486     // All pointers are now adjusted, move objects accordingly
487     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
488 
489     compacter.phase4_compact();
490   }
491 


492   restore_marks();
493 
494   // Set saved marks for allocation profiler (and other things? -- dld)
495   // (Should this be in general part?)
496   gch->save_marks();
497 
498   deallocate_stacks();
499 
500   MarkSweep::_string_dedup_requests->flush();
501 
502   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
503   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
504 
505   gch->prune_scavengable_nmethods();
506 
507   // Update heap occupancy information which is used as
508   // input to soft ref clearing policy at the next gc.
509   Universe::heap()->update_capacity_and_used_at_gc();
510 
511   // Signal that we have completed a visit to all live objects.

 31 #include "classfile/vmSymbols.hpp"
 32 #include "code/codeCache.hpp"
 33 #include "compiler/oopMap.hpp"
 34 #include "gc/serial/cardTableRS.hpp"
 35 #include "gc/serial/defNewGeneration.hpp"
 36 #include "gc/serial/generation.hpp"
 37 #include "gc/serial/genMarkSweep.hpp"
 38 #include "gc/serial/markSweep.inline.hpp"
 39 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 40 #include "gc/serial/serialHeap.hpp"
 41 #include "gc/shared/classUnloadingContext.hpp"
 42 #include "gc/shared/collectedHeap.inline.hpp"
 43 #include "gc/shared/gcHeapSummary.hpp"
 44 #include "gc/shared/gcTimer.hpp"
 45 #include "gc/shared/gcTrace.hpp"
 46 #include "gc/shared/gcTraceTime.inline.hpp"
 47 #include "gc/shared/modRefBarrierSet.hpp"
 48 #include "gc/shared/preservedMarks.inline.hpp"
 49 #include "gc/shared/referencePolicy.hpp"
 50 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 51 #include "gc/shared/slidingForwarding.inline.hpp"
 52 #include "gc/shared/space.inline.hpp"
 53 #include "gc/shared/strongRootsScope.hpp"
 54 #include "gc/shared/weakProcessor.hpp"
 55 #include "memory/universe.hpp"
 56 #include "oops/instanceRefKlass.hpp"
 57 #include "oops/oop.inline.hpp"
 58 #include "prims/jvmtiExport.hpp"
 59 #include "runtime/handles.inline.hpp"
 60 #include "runtime/javaThread.hpp"
 61 #include "runtime/prefetch.inline.hpp"
 62 #include "runtime/synchronizer.hpp"
 63 #include "runtime/vmThread.hpp"
 64 #include "utilities/copy.hpp"
 65 #include "utilities/events.hpp"
 66 #include "utilities/stack.inline.hpp"
 67 #if INCLUDE_JVMCI
 68 #include "jvmci/jvmci.hpp"
 69 #endif
 70 
 71 class DeadSpacer : StackObj {

179   }
180 
181   static void prefetch_read_scan(void* p) {
182     if (PrefetchScanIntervalInBytes >= 0) {
183       Prefetch::read(p, PrefetchScanIntervalInBytes);
184     }
185   }
186 
187   static void prefetch_write_scan(void* p) {
188     if (PrefetchScanIntervalInBytes >= 0) {
189       Prefetch::write(p, PrefetchScanIntervalInBytes);
190     }
191   }
192 
193   static void prefetch_write_copy(void* p) {
194     if (PrefetchCopyIntervalInBytes >= 0) {
195       Prefetch::write(p, PrefetchCopyIntervalInBytes);
196     }
197   }
198 
199   template <bool ALT_FWD>
200   static void forward_obj(oop obj, HeapWord* new_addr) {
201     prefetch_write_scan(obj);
202     if (cast_from_oop<HeapWord*>(obj) != new_addr) {
203       SlidingForwarding::forward_to<ALT_FWD>(obj, cast_to_oop(new_addr));
204     } else {
205       assert(obj->is_gc_marked(), "inv");
206       // This obj will stay in-place. Fix the markword.
207       obj->init_mark();
208     }
209   }
210 
211   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
212     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
213       prefetch_read_scan(i_addr);
214       oop obj = cast_to_oop(i_addr);
215       if (obj->is_gc_marked()) {
216         return i_addr;
217       }
218       i_addr += obj->size();
219     }
220     return end;
221   };
222 
223   template <bool ALT_FWD>
224   static size_t relocate(HeapWord* addr) {
225     // Prefetch source and destination
226     prefetch_read_scan(addr);
227 
228     oop obj = cast_to_oop(addr);
229     oop new_obj = SlidingForwarding::forwardee<ALT_FWD>(obj);
230     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
231     assert(addr != new_addr, "inv");
232     prefetch_write_copy(new_addr);
233 
234     size_t obj_size = obj->size();
235     Copy::aligned_conjoint_words(addr, new_addr, obj_size);
236     new_obj->init_mark();
237 
238     return obj_size;
239   }
240 
241 public:
242   explicit Compacter(SerialHeap* heap) {
243     // In this order so that heap is compacted towards old-gen.
244     _spaces[0].init(heap->old_gen()->space());
245     _spaces[1].init(heap->young_gen()->eden());
246     _spaces[2].init(heap->young_gen()->from());
247 
248     bool is_promotion_failed = (heap->young_gen()->from()->next_compaction_space() != nullptr);
249     if (is_promotion_failed) {
250       _spaces[3].init(heap->young_gen()->to());
251       _num_spaces = 4;
252     } else {
253       _num_spaces = 3;
254     }
255     _index = 0;
256   }
257 
258   template <bool ALT_FWD>
259   void phase2_calculate_new_addr() {
260     for (uint i = 0; i < _num_spaces; ++i) {
261       ContiguousSpace* space = get_space(i);
262       HeapWord* cur_addr = space->bottom();
263       HeapWord* top = space->top();
264 
265       bool record_first_dead_done = false;
266 
267       DeadSpacer dead_spacer(space);
268 
269       while (cur_addr < top) {
270         oop obj = cast_to_oop(cur_addr);
271         size_t obj_size = obj->size();
272         if (obj->is_gc_marked()) {
273           HeapWord* new_addr = alloc(obj_size);
274           forward_obj<ALT_FWD>(obj, new_addr);
275           cur_addr += obj_size;
276         } else {
277           // Skipping the current known-unmarked obj
278           HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
279           if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
280             // Register space for the filler obj
281             alloc(pointer_delta(next_live_addr, cur_addr));
282           } else {
283             if (!record_first_dead_done) {
284               record_first_dead(i, cur_addr);
285               record_first_dead_done = true;
286             }
287             *(HeapWord**)cur_addr = next_live_addr;
288           }
289           cur_addr = next_live_addr;
290         }
291       }
292 
293       if (!record_first_dead_done) {
294         record_first_dead(i, top);
295       }
296     }
297   }
298 
299   void phase2_calculate_new_addr() {
300     if (UseAltGCForwarding) {
301       phase2_calculate_new_addr<true>();
302     } else {
303       phase2_calculate_new_addr<false>();
304     }
305   }
306 
307   template <bool ALT_FWD>
308   void phase3_adjust_pointers() {
309     for (uint i = 0; i < _num_spaces; ++i) {
310       ContiguousSpace* space = get_space(i);
311       HeapWord* cur_addr = space->bottom();
312       HeapWord* const top = space->top();
313       HeapWord* const first_dead = get_first_dead(i);
314 
315       while (cur_addr < top) {
316         prefetch_write_scan(cur_addr);
317         if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
318           size_t size = MarkSweep::adjust_pointers<ALT_FWD>(cast_to_oop(cur_addr));
319           cur_addr += size;
320         } else {
321           assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
322           cur_addr = *(HeapWord**)cur_addr;
323         }
324       }
325     }
326   }
327 
328   void phase3_adjust_pointers() {
329     if (UseAltGCForwarding) {
330       phase3_adjust_pointers<true>();
331     } else {
332       phase3_adjust_pointers<false>();
333     }
334   }
335 
336   template <bool ALT_FWD>
337   void phase4_compact() {
338     for (uint i = 0; i < _num_spaces; ++i) {
339       ContiguousSpace* space = get_space(i);
340       HeapWord* cur_addr = space->bottom();
341       HeapWord* top = space->top();
342 
343       // Check if the first obj inside this space is forwarded.
344       if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) {
345         // Jump over consecutive (in-place) live-objs-chunk
346         cur_addr = get_first_dead(i);
347       }
348 
349       while (cur_addr < top) {
350         if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) {
351           cur_addr = *(HeapWord**) cur_addr;
352           continue;
353         }
354         cur_addr += relocate<ALT_FWD>(cur_addr);
355       }
356 
357       // Reset top and unused memory
358       space->set_top(get_compaction_top(i));
359       if (ZapUnusedHeapArea) {
360         space->mangle_unused_area();
361       }
362     }
363   }
364 
365   void phase4_compact() {
366     if (UseAltGCForwarding) {
367       phase4_compact<true>();
368     } else {
369       phase4_compact<false>();
370     }
371   }
372 };
373 
374 void GenMarkSweep::phase1_mark(bool clear_all_softrefs) {
375   // Recursively traverse all live objects and mark them
376   GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
377 
378   SerialHeap* gch = SerialHeap::heap();
379 
380   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
381 
382   ref_processor()->start_discovery(clear_all_softrefs);
383 
384   {
385     StrongRootsScope srs(0);
386 
387     CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure;
388     MarkingCodeBlobClosure mark_code_closure(&follow_root_closure, !CodeBlobToOopClosure::FixRelocations, true);
389     gch->process_roots(SerialHeap::SO_None,
390                        &follow_root_closure,
391                        &follow_cld_closure,

461   SerialHeap* gch = SerialHeap::heap();
462 #ifdef ASSERT
463   if (gch->soft_ref_policy()->should_clear_all_soft_refs()) {
464     assert(clear_all_softrefs, "Policy should have been checked earlier");
465   }
466 #endif
467 
468   gch->trace_heap_before_gc(_gc_tracer);
469 
470   // Increment the invocation count
471   _total_invocations++;
472 
473   // Capture used regions for old-gen to reestablish old-to-young invariant
474   // after full-gc.
475   gch->old_gen()->save_used_region();
476 
477   allocate_stacks();
478 
479   phase1_mark(clear_all_softrefs);
480 
481   SlidingForwarding::begin();
482 
483   Compacter compacter{gch};
484 
485   {
486     // Now all live objects are marked, compute the new object addresses.
487     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
488 
489     compacter.phase2_calculate_new_addr();
490   }
491 
492   // Don't add any more derived pointers during phase3
493 #if COMPILER2_OR_JVMCI
494   assert(DerivedPointerTable::is_active(), "Sanity");
495   DerivedPointerTable::set_active(false);
496 #endif
497 
498   {
499     // Adjust the pointers to reflect the new locations
500     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
501 
502     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
503 
504     if (UseAltGCForwarding) {
505       AdjustPointerClosure<true> adjust_pointer_closure;
506       CLDToOopClosure adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
507       CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
508       gch->process_roots(SerialHeap::SO_AllCodeCache,
509                          &adjust_pointer_closure,
510                          &adjust_cld_closure,
511                          &adjust_cld_closure,
512                          &code_closure);
513 
514       WeakProcessor::oops_do(&adjust_pointer_closure);
515     } else {
516       AdjustPointerClosure<false> adjust_pointer_closure;
517       CLDToOopClosure adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
518       CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
519       gch->process_roots(SerialHeap::SO_AllCodeCache,
520                          &adjust_pointer_closure,
521                          &adjust_cld_closure,
522                          &adjust_cld_closure,
523                          &code_closure);
524 
525       WeakProcessor::oops_do(&adjust_pointer_closure);
526     }
527 
528     adjust_marks();
529     compacter.phase3_adjust_pointers();
530   }
531 
532   {
533     // All pointers are now adjusted, move objects accordingly
534     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
535 
536     compacter.phase4_compact();
537   }
538 
539   SlidingForwarding::end();
540 
541   restore_marks();
542 
543   // Set saved marks for allocation profiler (and other things? -- dld)
544   // (Should this be in general part?)
545   gch->save_marks();
546 
547   deallocate_stacks();
548 
549   MarkSweep::_string_dedup_requests->flush();
550 
551   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
552   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
553 
554   gch->prune_scavengable_nmethods();
555 
556   // Update heap occupancy information which is used as
557   // input to soft ref clearing policy at the next gc.
558   Universe::heap()->update_capacity_and_used_at_gc();
559 
560   // Signal that we have completed a visit to all live objects.
< prev index next >