1 /*
  2  * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_G1_G1FULLGCPREPARETASK_INLINE_HPP
 26 #define SHARE_GC_G1_G1FULLGCPREPARETASK_INLINE_HPP
 27 
 28 #include "gc/g1/g1FullGCPrepareTask.hpp"
 29 
 30 #include "gc/g1/g1CollectedHeap.inline.hpp"
 31 #include "gc/g1/g1FullCollector.hpp"
 32 #include "gc/g1/g1FullGCCompactionPoint.hpp"
 33 #include "gc/g1/g1FullGCScope.hpp"
 34 #include "gc/g1/g1HeapRegion.inline.hpp"
 35 #include "gc/shared/slidingForwarding.inline.hpp"
 36 
 37 void G1DetermineCompactionQueueClosure::free_empty_humongous_region(HeapRegion* hr) {
 38   _g1h->free_humongous_region(hr, nullptr);
 39   _collector->set_free(hr->hrm_index());
 40   add_to_compaction_queue(hr);
 41 }
 42 
 43 inline bool G1DetermineCompactionQueueClosure::should_compact(HeapRegion* hr) const {
 44   // There is no need to iterate and forward objects in non-movable regions ie.
 45   // prepare them for compaction.
 46   if (hr->is_humongous() || hr->has_pinned_objects()) {
 47     return false;
 48   }
 49   size_t live_words = _collector->live_words(hr->hrm_index());
 50   size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
 51   // High live ratio region will not be compacted.
 52   return live_words <= live_words_threshold;
 53 }
 54 
 55 inline uint G1DetermineCompactionQueueClosure::next_worker() {
 56   uint result = _cur_worker;
 57   _cur_worker = (_cur_worker + 1) % _collector->workers();
 58   return result;
 59 }
 60 
 61 inline G1FullGCCompactionPoint* G1DetermineCompactionQueueClosure::next_compaction_point() {
 62   return _collector->compaction_point(next_worker());
 63 }
 64 
 65 inline void G1DetermineCompactionQueueClosure::add_to_compaction_queue(HeapRegion* hr) {
 66   _collector->set_compaction_top(hr, hr->bottom());
 67   _collector->set_has_compaction_targets();
 68 
 69   G1FullGCCompactionPoint* cp = next_compaction_point();
 70   if (!cp->is_initialized()) {
 71     cp->initialize(hr);
 72   }
 73   // Add region to the compaction queue.
 74   cp->add(hr);
 75 }
 76 
 77 static bool has_pinned_objects(HeapRegion* hr) {
 78   return hr->has_pinned_objects() ||
 79       (hr->is_humongous() && hr->humongous_start_region()->has_pinned_objects());
 80 }
 81 
 82 inline bool G1DetermineCompactionQueueClosure::do_heap_region(HeapRegion* hr) {
 83   if (should_compact(hr)) {
 84     assert(!hr->is_humongous(), "moving humongous objects not supported.");
 85     add_to_compaction_queue(hr);
 86     return false;
 87   }
 88 
 89   assert(hr->containing_set() == nullptr, "already cleared by PrepareRegionsClosure");
 90   if (has_pinned_objects(hr)) {
 91     // First check regions with pinned objects: they need to be skipped regardless
 92     // of region type and never be considered for reclamation.
 93     assert(_collector->is_skip_compacting(hr->hrm_index()), "pinned region %u must be skip_compacting", hr->hrm_index());
 94     log_trace(gc, phases)("Phase 2: skip compaction region index: %u (%s), has pinned objects",
 95                           hr->hrm_index(), hr->get_short_type_str());
 96   } else if (hr->is_humongous()) {
 97     oop obj = cast_to_oop(hr->humongous_start_region()->bottom());
 98     bool is_empty = !_collector->mark_bitmap()->is_marked(obj);
 99     if (is_empty) {
100       free_empty_humongous_region(hr);
101     } else {
102       _collector->set_has_humongous();
103     }
104   } else {
105     assert(MarkSweepDeadRatio > 0,
106            "only skip compaction for other regions when MarkSweepDeadRatio > 0");
107 
108     // Too many live objects in the region; skip compacting it.
109     _collector->update_from_compacting_to_skip_compacting(hr->hrm_index());
110     log_trace(gc, phases)("Phase 2: skip compaction region index: %u, live words: " SIZE_FORMAT,
111                             hr->hrm_index(), _collector->live_words(hr->hrm_index()));
112   }
113 
114   return false;
115 }
116 
117 inline size_t G1SerialRePrepareClosure::apply(oop obj) {
118   if (SlidingForwarding::is_forwarded(obj)) {
119     // We skip objects compiled into the first region or
120     // into regions not part of the serial compaction point.
121     if (cast_from_oop<HeapWord*>(SlidingForwarding::forwardee(obj)) < _dense_prefix_top) {
122       return obj->size();
123     }
124   }
125 
126   // Get size and forward.
127   size_t size = obj->size();
128   _cp->forward(obj, size);
129 
130   return size;
131 }
132 
133 #endif // SHARE_GC_G1_G1FULLGCPREPARETASK_INLINE_HPP