1 /*
  2  * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_G1_G1FULLGCPREPARETASK_INLINE_HPP
 26 #define SHARE_GC_G1_G1FULLGCPREPARETASK_INLINE_HPP
 27 
 28 #include "gc/g1/g1FullGCPrepareTask.hpp"
 29 
 30 #include "gc/g1/g1CollectedHeap.inline.hpp"
 31 #include "gc/g1/g1FullCollector.hpp"
 32 #include "gc/g1/g1FullGCCompactionPoint.hpp"
 33 #include "gc/g1/g1FullGCScope.hpp"
 34 #include "gc/g1/g1HeapRegion.inline.hpp"
 35 
 36 void G1DetermineCompactionQueueClosure::free_empty_humongous_region(HeapRegion* hr) {
 37   _g1h->free_humongous_region(hr, nullptr);
 38   _collector->set_free(hr->hrm_index());
 39   add_to_compaction_queue(hr);
 40 }
 41 
 42 inline bool G1DetermineCompactionQueueClosure::should_compact(HeapRegion* hr) const {
 43   // There is no need to iterate and forward objects in non-movable regions ie.
 44   // prepare them for compaction.
 45   if (hr->is_humongous() || hr->has_pinned_objects()) {
 46     return false;
 47   }
 48   size_t live_words = _collector->live_words(hr->hrm_index());
 49   size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
 50   // High live ratio region will not be compacted.
 51   return live_words <= live_words_threshold;
 52 }
 53 
 54 inline uint G1DetermineCompactionQueueClosure::next_worker() {
 55   uint result = _cur_worker;
 56   _cur_worker = (_cur_worker + 1) % _collector->workers();
 57   return result;
 58 }
 59 
 60 inline G1FullGCCompactionPoint* G1DetermineCompactionQueueClosure::next_compaction_point() {
 61   return _collector->compaction_point(next_worker());
 62 }
 63 
 64 inline void G1DetermineCompactionQueueClosure::add_to_compaction_queue(HeapRegion* hr) {
 65   _collector->set_compaction_top(hr, hr->bottom());
 66   _collector->set_has_compaction_targets();
 67 
 68   G1FullGCCompactionPoint* cp = next_compaction_point();
 69   if (!cp->is_initialized()) {
 70     cp->initialize(hr);
 71   }
 72   // Add region to the compaction queue.
 73   cp->add(hr);
 74 }
 75 
 76 static bool has_pinned_objects(HeapRegion* hr) {
 77   return hr->has_pinned_objects() ||
 78       (hr->is_humongous() && hr->humongous_start_region()->has_pinned_objects());
 79 }
 80 
 81 inline bool G1DetermineCompactionQueueClosure::do_heap_region(HeapRegion* hr) {
 82   if (should_compact(hr)) {
 83     assert(!hr->is_humongous(), "moving humongous objects not supported.");
 84     add_to_compaction_queue(hr);
 85     return false;
 86   }
 87 
 88   assert(hr->containing_set() == nullptr, "already cleared by PrepareRegionsClosure");
 89   if (has_pinned_objects(hr)) {
 90     // First check regions with pinned objects: they need to be skipped regardless
 91     // of region type and never be considered for reclamation.
 92     assert(_collector->is_skip_compacting(hr->hrm_index()), "pinned region %u must be skip_compacting", hr->hrm_index());
 93     log_trace(gc, phases)("Phase 2: skip compaction region index: %u (%s), has pinned objects",
 94                           hr->hrm_index(), hr->get_short_type_str());
 95   } else if (hr->is_humongous()) {
 96     oop obj = cast_to_oop(hr->humongous_start_region()->bottom());
 97     bool is_empty = !_collector->mark_bitmap()->is_marked(obj);
 98     if (is_empty) {
 99       free_empty_humongous_region(hr);
100     } else {
101       _collector->set_has_humongous();
102     }
103   } else {
104     assert(MarkSweepDeadRatio > 0,
105            "only skip compaction for other regions when MarkSweepDeadRatio > 0");
106 
107     // Too many live objects in the region; skip compacting it.
108     _collector->update_from_compacting_to_skip_compacting(hr->hrm_index());
109     log_trace(gc, phases)("Phase 2: skip compaction region index: %u, live words: " SIZE_FORMAT,
110                             hr->hrm_index(), _collector->live_words(hr->hrm_index()));
111   }
112 
113   return false;
114 }
115 
116 inline size_t G1SerialRePrepareClosure::apply(oop obj) {
117   if (obj->is_forwarded()) {
118     // We skip objects compiled into the first region or
119     // into regions not part of the serial compaction point.
120     if (cast_from_oop<HeapWord*>(obj->forwardee()) < _dense_prefix_top) {
121       return obj->size();
122     }
123   }
124 
125   // Get size and forward.
126   size_t size = obj->size();
127   _cp->forward(obj, size);
128 
129   return size;
130 }
131 
132 #endif // SHARE_GC_G1_G1FULLGCPREPARETASK_INLINE_HPP