1 /*
  2  * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1CollectedHeap.hpp"
 27 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 28 #include "gc/g1/g1FullCollector.hpp"
 29 #include "gc/g1/g1FullCollector.inline.hpp"
 30 #include "gc/g1/g1FullGCCompactionPoint.hpp"
 31 #include "gc/g1/g1FullGCCompactTask.hpp"
 32 #include "gc/g1/heapRegion.inline.hpp"
 33 #include "gc/shared/gcTraceTime.inline.hpp"
 34 #include "gc/shared/slidingForwarding.inline.hpp"
 35 #include "logging/log.hpp"
 36 #include "oops/oop.inline.hpp"
 37 #include "utilities/ticks.hpp"
 38 
 39 // Do work for all skip-compacting regions.
 40 class G1ResetSkipCompactingClosure : public HeapRegionClosure {
 41   G1FullCollector* _collector;
 42 
 43 public:
 44   G1ResetSkipCompactingClosure(G1FullCollector* collector) : _collector(collector) { }
 45 
 46   bool do_heap_region(HeapRegion* r) {
 47     uint region_index = r->hrm_index();
 48     // Only for skip-compaction regions; early return otherwise.
 49     if (!_collector->is_skip_compacting(region_index)) {
 50       return false;
 51     }
 52 #ifdef ASSERT
 53     if (r->is_humongous()) {
 54       oop obj = cast_to_oop(r->humongous_start_region()->bottom());
 55       assert(_collector->mark_bitmap()->is_marked(obj), "must be live");
 56     } else if (r->is_open_archive()) {
 57       bool is_empty = (_collector->live_words(r->hrm_index()) == 0);
 58       assert(!is_empty, "should contain at least one live obj");
 59     } else if (r->is_closed_archive()) {
 60       // should early-return above
 61       ShouldNotReachHere();
 62     } else {
 63       assert(_collector->live_words(region_index) > _collector->scope()->region_compaction_threshold(),
 64              "should be quite full");
 65     }
 66 #endif
 67     r->reset_skip_compacting_after_full_gc();
 68     return false;
 69   }
 70 };
 71 
 72 void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_bitmap(oop obj) {
 73   assert(_bitmap->is_marked(obj), "Should only compact marked objects");
 74   _bitmap->clear(obj);
 75 }
 76 
 77 size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
 78   size_t size = obj->size();
 79   if (obj->is_forwarded()) {
 80     HeapWord* destination = cast_from_oop<HeapWord*>(_forwarding->forwardee(obj));
 81 
 82     // copy object and reinit its mark
 83     HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
 84     assert(obj_addr != destination, "everything in this pass should be moving");
 85     Copy::aligned_conjoint_words(obj_addr, destination, size);
 86 
 87     // There is no need to transform stack chunks - marking already did that.
 88     cast_to_oop(destination)->init_mark();
 89     assert(cast_to_oop(destination)->klass() != NULL, "should have a class");
 90   }
 91 
 92   // Clear the mark for the compacted object to allow reuse of the
 93   // bitmap without an additional clearing step.
 94   clear_in_bitmap(obj);
 95   return size;
 96 }
 97 
 98 void G1FullGCCompactTask::compact_region(HeapRegion* hr) {
 99   assert(!hr->is_pinned(), "Should be no pinned region in compaction queue");
100   assert(!hr->is_humongous(), "Should be no humongous regions in compaction queue");
101 
102   if (!collector()->is_free(hr->hrm_index())) {
103     // The compaction closure not only copies the object to the new
104     // location, but also clears the bitmap for it. This is needed
105     // for bitmap verification and to be able to use the bitmap
106     // for evacuation failures in the next young collection. Testing
107     // showed that it was better overall to clear bit by bit, compared
108     // to clearing the whole region at the end. This difference was
109     // clearly seen for regions with few marks.
110     G1CompactRegionClosure compact(collector()->mark_bitmap());
111     hr->apply_to_marked_objects(collector()->mark_bitmap(), &compact);
112   }
113 
114   hr->reset_compacted_after_full_gc();
115 }
116 
117 void G1FullGCCompactTask::work(uint worker_id) {
118   Ticks start = Ticks::now();
119   GrowableArray<HeapRegion*>* compaction_queue = collector()->compaction_point(worker_id)->regions();
120   for (GrowableArrayIterator<HeapRegion*> it = compaction_queue->begin();
121        it != compaction_queue->end();
122        ++it) {
123     compact_region(*it);
124   }
125 
126   G1ResetSkipCompactingClosure hc(collector());
127   G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&hc, &_claimer, worker_id);
128   log_task("Compaction task", worker_id, start);
129 }
130 
131 void G1FullGCCompactTask::serial_compaction() {
132   GCTraceTime(Debug, gc, phases) tm("Phase 4: Serial Compaction", collector()->scope()->timer());
133   GrowableArray<HeapRegion*>* compaction_queue = collector()->serial_compaction_point()->regions();
134   for (GrowableArrayIterator<HeapRegion*> it = compaction_queue->begin();
135        it != compaction_queue->end();
136        ++it) {
137     compact_region(*it);
138   }
139 }