1 /*
  2  * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
 28 
 29 #include "gc/shenandoah/shenandoahMark.hpp"
 30 
 31 #include "gc/shared/continuationGCSupport.inline.hpp"
 32 #include "gc/shenandoah/shenandoahAsserts.hpp"
 33 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 35 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 36 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 37 #include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
 38 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 39 #include "gc/shenandoah/shenandoahUtils.hpp"
 40 #include "memory/iterator.inline.hpp"
 41 #include "oops/compressedOops.inline.hpp"
 42 #include "oops/oop.inline.hpp"
 43 #include "runtime/prefetch.inline.hpp"
 44 #include "utilities/devirtualizer.inline.hpp"
 45 #include "utilities/powerOfTwo.hpp"
 46 
 47 template <StringDedupMode STRING_DEDUP>
 48 void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) {
 49   if (STRING_DEDUP == ENQUEUE_DEDUP) {
 50     if (ShenandoahStringDedup::is_candidate(obj)) {
 51       req->add(obj);
 52     }
 53   } else if (STRING_DEDUP == ALWAYS_DEDUP) {
 54     if (ShenandoahStringDedup::is_string_candidate(obj) &&
 55         !ShenandoahStringDedup::dedup_requested(obj)) {
 56         req->add(obj);
 57     }
 58   }
 59 }
 60 
 61 template <class T, ShenandoahGenerationType GENERATION, StringDedupMode STRING_DEDUP>
 62 void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task, uint worker_id) {
 63   oop obj = task->obj();
 64 
 65   // TODO: This will push array chunks into the mark queue with no regard for
 66   // generations. I don't think it will break anything, but the young generation
 67   // scan might end up processing some old generation array chunks.
 68 
 69   shenandoah_assert_not_forwarded(nullptr, obj);
 70   shenandoah_assert_marked(nullptr, obj);
 71   shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
 72 
 73   // Are we in weak subgraph scan?
 74   bool weak = task->is_weak();
 75   cl->set_weak(weak);
 76 
 77   if (task->is_not_chunked()) {
 78     if (obj->is_instance()) {
 79       // Case 1: Normal oop, process as usual.
 80       if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
 81           // Loom doesn't support mixing of weak marking and strong marking of
 82           // stack chunks.
 83           cl->set_weak(false);
 84       }
 85 
 86       obj->oop_iterate(cl);
 87       dedup_string<STRING_DEDUP>(obj, req);
 88     } else if (obj->is_objArray()) {
 89       // Case 2: Object array instance and no chunk is set. Must be the first
 90       // time we visit it, start the chunked processing.
 91       do_chunked_array_start<T>(q, cl, obj, weak);
 92     } else {
 93       // Case 3: Primitive array. Do nothing, no oops there. We use the same
 94       // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
 95       // We skip iterating over the klass pointer since we know that
 96       // Universe::TypeArrayKlass never moves.
 97       assert (obj->is_typeArray(), "should be type array");
 98     }
 99     // Count liveness the last: push the outstanding work to the queues first
100     // Avoid double-counting objects that are visited twice due to upgrade
101     // from final- to strong mark.
102     if (task->count_liveness()) {
103       count_liveness<GENERATION>(live_data, obj, worker_id);
104     }
105   } else {
106     // Case 4: Array chunk, has sensible chunk id. Process it.
107     do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
108   }
109 }
110 
111 template <ShenandoahGenerationType GENERATION>
112 inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj, uint worker_id) {
113   const ShenandoahHeap* const heap = ShenandoahHeap::heap();
114   const size_t region_idx = heap->heap_region_index_containing(obj);
115   ShenandoahHeapRegion* const region = heap->get_region(region_idx);
116   const size_t size = obj->size();
117 
118   // Age census for objects in the young generation
119   if (GENERATION == YOUNG || (GENERATION == GLOBAL && region->is_young())) {
120     assert(heap->mode()->is_generational(), "Only if generational");
121     if (ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) {
122       assert(region->is_young(), "Only for young objects");
123       uint age = ShenandoahHeap::get_object_age(obj);
124       CENSUS_NOISE(heap->age_census()->add(age, region->age(), region->youth(), size, worker_id);)
125       NO_CENSUS_NOISE(heap->age_census()->add(age, region->age(), size, worker_id);)
126     }
127   }
128 
129   if (!region->is_humongous_start()) {
130     assert(!region->is_humongous(), "Cannot have continuations here");
131     assert(region->is_affiliated(), "Do not count live data within Free Regular Region " SIZE_FORMAT, region_idx);
132     ShenandoahLiveData cur = live_data[region_idx];
133     size_t new_val = size + cur;
134     if (new_val >= SHENANDOAH_LIVEDATA_MAX) {
135       // overflow, flush to region data
136       region->increase_live_data_gc_words(new_val);
137       live_data[region_idx] = 0;
138     } else {
139       // still good, remember in locals
140       live_data[region_idx] = (ShenandoahLiveData) new_val;
141     }
142   } else {
143     shenandoah_assert_in_correct_region(nullptr, obj);
144     size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
145 
146     assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region " SIZE_FORMAT, region_idx);
147     for (size_t i = region_idx; i < region_idx + num_regions; i++) {
148       ShenandoahHeapRegion* chain_reg = heap->get_region(i);
149       assert(chain_reg->is_humongous(), "Expecting a humongous region");
150       assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region " SIZE_FORMAT, i);
151       chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
152     }
153   }
154 }
155 
156 template <class T>
157 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
158   assert(obj->is_objArray(), "expect object array");
159   objArrayOop array = objArrayOop(obj);
160   int len = array->length();
161 
162   // Mark objArray klass metadata
163   if (Devirtualizer::do_metadata(cl)) {
164     Devirtualizer::do_klass(cl, array->klass());
165   }
166 
167   if (len <= (int) ObjArrayMarkingStride*2) {
168     // A few slices only, process directly
169     array->oop_iterate_range(cl, 0, len);
170   } else {
171     int bits = log2i_graceful(len);
172     // Compensate for non-power-of-two arrays, cover the array in excess:
173     if (len != (1 << bits)) bits++;
174 
175     // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
176     // boundaries against array->length(), touching the array header on every chunk.
177     //
178     // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.
179     // If the array is not divided in chunk sizes, then there would be an irregular tail,
180     // which we will process separately.
181 
182     int last_idx = 0;
183 
184     int chunk = 1;
185     int pow = bits;
186 
187     // Handle overflow
188     if (pow >= 31) {
189       assert (pow == 31, "sanity");
190       pow--;
191       chunk = 2;
192       last_idx = (1 << pow);
193       bool pushed = q->push(ShenandoahMarkTask(array, true, weak, 1, pow));
194       assert(pushed, "overflow queue should always succeed pushing");
195     }
196 
197     // Split out tasks, as suggested in ShenandoahMarkTask docs. Record the last
198     // successful right boundary to figure out the irregular tail.
199     while ((1 << pow) > (int)ObjArrayMarkingStride &&
200            (chunk*2 < ShenandoahMarkTask::chunk_size())) {
201       pow--;
202       int left_chunk = chunk*2 - 1;
203       int right_chunk = chunk*2;
204       int left_chunk_end = left_chunk * (1 << pow);
205       if (left_chunk_end < len) {
206         bool pushed = q->push(ShenandoahMarkTask(array, true, weak, left_chunk, pow));
207         assert(pushed, "overflow queue should always succeed pushing");
208         chunk = right_chunk;
209         last_idx = left_chunk_end;
210       } else {
211         chunk = left_chunk;
212       }
213     }
214 
215     // Process the irregular tail, if present
216     int from = last_idx;
217     if (from < len) {
218       array->oop_iterate_range(cl, from, len);
219     }
220   }
221 }
222 
223 template <class T>
224 inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
225   assert(obj->is_objArray(), "expect object array");
226   objArrayOop array = objArrayOop(obj);
227 
228   assert (ObjArrayMarkingStride > 0, "sanity");
229 
230   // Split out tasks, as suggested in ShenandoahMarkTask docs. Avoid pushing tasks that
231   // are known to start beyond the array.
232   while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
233     pow--;
234     chunk *= 2;
235     bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
236     assert(pushed, "overflow queue should always succeed pushing");
237   }
238 
239   int chunk_size = 1 << pow;
240 
241   int from = (chunk - 1) * chunk_size;
242   int to = chunk * chunk_size;
243 
244 #ifdef ASSERT
245   int len = array->length();
246   assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
247   assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
248 #endif
249 
250   array->oop_iterate_range(cl, from, to);
251 }
252 
253 template <ShenandoahGenerationType GENERATION>
254 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
255 private:
256   ShenandoahObjToScanQueue* _queue;
257   ShenandoahObjToScanQueue* _old_queue;
258   ShenandoahHeap* _heap;
259   ShenandoahMarkingContext* const _mark_context;
260 public:
261   ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q) :
262     _queue(q),
263     _old_queue(old_q),
264     _heap(ShenandoahHeap::heap()),
265     _mark_context(_heap->marking_context())
266   {
267   }
268 
269   void do_buffer(void **buffer, size_t size) {
270     assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
271     for (size_t i = 0; i < size; ++i) {
272       oop *p = (oop *) &buffer[i];
273       ShenandoahMark::mark_through_ref<oop, GENERATION>(p, _queue, _old_queue, _mark_context, false);
274     }
275   }
276 };
277 
278 template<ShenandoahGenerationType GENERATION>
279 bool ShenandoahMark::in_generation(ShenandoahHeap* const heap, oop obj) {
280   // Each in-line expansion of in_generation() resolves GENERATION at compile time.
281   if (GENERATION == YOUNG) {
282     return heap->is_in_young(obj);
283   } else if (GENERATION == OLD) {
284     return heap->is_in_old(obj);
285   } else if (GENERATION == GLOBAL || GENERATION == NON_GEN) {
286     return true;
287   } else {
288     return false;
289   }
290 }
291 
292 template<class T, ShenandoahGenerationType GENERATION>
293 inline void ShenandoahMark::mark_through_ref(T *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
294   // Note: This is a very hot code path, so the code should be conditional on GENERATION template
295   // parameter where possible, in order to generate the most efficient code.
296 
297   T o = RawAccess<>::oop_load(p);
298   if (!CompressedOops::is_null(o)) {
299     oop obj = CompressedOops::decode_not_null(o);
300 
301     ShenandoahHeap* heap = ShenandoahHeap::heap();
302     shenandoah_assert_not_forwarded(p, obj);
303     shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc());
304     if (in_generation<GENERATION>(heap, obj)) {
305       mark_ref(q, mark_context, weak, obj);
306       shenandoah_assert_marked(p, obj);
307       // TODO: As implemented herein, GLOBAL collections reconstruct the card table during GLOBAL concurrent
308       // marking. Note that the card table is cleaned at init_mark time so it needs to be reconstructed to support
309       // future young-gen collections.  It might be better to reconstruct card table in
310       // ShenandoahHeapRegion::global_oop_iterate_and_fill_dead.  We could either mark all live memory as dirty, or could
311       // use the GLOBAL update-refs scanning of pointers to determine precisely which cards to flag as dirty.
312       if (GENERATION == YOUNG && heap->is_in_old(p)) {
313         // Mark card as dirty because remembered set scanning still finds interesting pointer.
314         heap->mark_card_as_dirty((HeapWord*)p);
315       } else if (GENERATION == GLOBAL && heap->is_in_old(p) && heap->is_in_young(obj)) {
316         // Mark card as dirty because GLOBAL marking finds interesting pointer.
317         heap->mark_card_as_dirty((HeapWord*)p);
318       }
319     } else if (old_q != nullptr) {
320       // Young mark, bootstrapping old_q or concurrent with old_q marking.
321       mark_ref(old_q, mark_context, weak, obj);
322       shenandoah_assert_marked(p, obj);
323     } else if (GENERATION == OLD) {
324       // Old mark, found a young pointer.
325       // TODO:  Rethink this: may be redundant with dirtying of cards identified during young-gen remembered set scanning
326       // and by mutator write barriers.  Assert
327       if (heap->is_in(p)) {
328         assert(heap->is_in_young(obj), "Expected young object.");
329         heap->mark_card_as_dirty(p);
330       }
331     }
332   }
333 }
334 
335 inline void ShenandoahMark::mark_ref(ShenandoahObjToScanQueue* q,
336                               ShenandoahMarkingContext* const mark_context,
337                               bool weak, oop obj) {
338   bool skip_live = false;
339   bool marked;
340   if (weak) {
341     marked = mark_context->mark_weak(obj);
342   } else {
343     marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
344   }
345   if (marked) {
346     bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
347     assert(pushed, "overflow queue should always succeed pushing");
348   }
349 }
350 
351 ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
352   return _task_queues;
353 }
354 
355 ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
356   return _task_queues->queue(index);
357 }
358 
359 ShenandoahObjToScanQueue* ShenandoahMark::get_old_queue(uint index) const {
360   if (_old_gen_task_queues != nullptr) {
361     return _old_gen_task_queues->queue(index);
362   }
363   return nullptr;
364 }
365 
366 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
--- EOF ---