1 /*
  2  * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "classfile/javaClasses.hpp"
 28 #include "gc/shared/workerThread.hpp"
 29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 30 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 31 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 32 #include "gc/shenandoah/shenandoahUtils.hpp"
 33 #include "runtime/atomic.hpp"
 34 #include "logging/log.hpp"
 35 
 36 static ReferenceType reference_type(oop reference) {
 37   return InstanceKlass::cast(reference->klass())->reference_type();
 38 }
 39 
 40 static const char* reference_type_name(ReferenceType type) {
 41   switch (type) {
 42     case REF_SOFT:
 43       return "Soft";
 44 
 45     case REF_WEAK:
 46       return "Weak";
 47 
 48     case REF_FINAL:
 49       return "Final";
 50 
 51     case REF_PHANTOM:
 52       return "Phantom";
 53 
 54     default:
 55       ShouldNotReachHere();
 56       return nullptr;
 57   }
 58 }
 59 
 60 template <typename T>
 61 static void set_oop_field(T* field, oop value);
 62 
 63 template <>
 64 void set_oop_field<oop>(oop* field, oop value) {
 65   *field = value;
 66 }
 67 
 68 template <>
 69 void set_oop_field<narrowOop>(narrowOop* field, oop value) {
 70   *field = CompressedOops::encode(value);
 71 }
 72 
 73 static oop lrb(oop obj) {
 74   if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) {
 75     return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
 76   } else {
 77     return obj;
 78   }
 79 }
 80 
 81 template <typename T>
 82 static volatile T* reference_referent_addr(oop reference) {
 83   return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference);
 84 }
 85 
 86 inline oop reference_coop_decode_raw(narrowOop v) {
 87   return CompressedOops::is_null(v) ? nullptr : CompressedOops::decode_raw(v);
 88 }
 89 
 90 inline oop reference_coop_decode_raw(oop v) {
 91   return v;
 92 }
 93 
 94 // Raw referent, it can be dead. You cannot treat it as oop without additional safety
 95 // checks, this is why it is HeapWord*. The decoding uses a special-case inlined
 96 // CompressedOops::decode method that bypasses normal oop-ness checks.
 97 template <typename T>
 98 static HeapWord* reference_referent_raw(oop reference) {
 99   T raw_oop = Atomic::load(reference_referent_addr<T>(reference));
100   return cast_from_oop<HeapWord*>(reference_coop_decode_raw(raw_oop));
101 }
102 
103 static void reference_clear_referent(oop reference) {
104   java_lang_ref_Reference::clear_referent_raw(reference);
105 }
106 
107 template <typename T>
108 static T* reference_discovered_addr(oop reference) {
109   return reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference));
110 }
111 
112 template <typename T>
113 static oop reference_discovered(oop reference) {
114   T heap_oop = *reference_discovered_addr<T>(reference);
115   return lrb(CompressedOops::decode(heap_oop));
116 }
117 
118 template <typename T>
119 static void reference_set_discovered(oop reference, oop discovered);
120 
121 template <>
122 void reference_set_discovered<oop>(oop reference, oop discovered) {
123   *reference_discovered_addr<oop>(reference) = discovered;
124 }
125 
126 template <>
127 void reference_set_discovered<narrowOop>(oop reference, oop discovered) {
128   *reference_discovered_addr<narrowOop>(reference) = CompressedOops::encode(discovered);
129 }
130 
131 template<typename T>
132 static bool reference_cas_discovered(oop reference, oop discovered) {
133   T* addr = reinterpret_cast<T *>(java_lang_ref_Reference::discovered_addr_raw(reference));
134   return ShenandoahHeap::atomic_update_oop_check(discovered, addr, nullptr);
135 }
136 
137 template <typename T>
138 static T* reference_next_addr(oop reference) {
139   return reinterpret_cast<T*>(java_lang_ref_Reference::next_addr_raw(reference));
140 }
141 
142 template <typename T>
143 static oop reference_next(oop reference) {
144   T heap_oop = RawAccess<>::oop_load(reference_next_addr<T>(reference));
145   return lrb(CompressedOops::decode(heap_oop));
146 }
147 
148 static void reference_set_next(oop reference, oop next) {
149   java_lang_ref_Reference::set_next_raw(reference, next);
150 }
151 
152 static void soft_reference_update_clock() {
153   const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
154   java_lang_ref_SoftReference::set_clock(now);
155 }
156 
157 ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() :
158   _discovered_list(nullptr),
159   _encountered_count(),
160   _discovered_count(),
161   _enqueued_count() {
162 }
163 
164 void ShenandoahRefProcThreadLocal::reset() {
165   _discovered_list = nullptr;
166   _mark_closure = nullptr;
167   for (uint i = 0; i < reference_type_count; i++) {
168     _encountered_count[i] = 0;
169     _discovered_count[i] = 0;
170     _enqueued_count[i] = 0;
171   }
172 }
173 
174 template <typename T>
175 T* ShenandoahRefProcThreadLocal::discovered_list_addr() {
176   return reinterpret_cast<T*>(&_discovered_list);
177 }
178 
179 template <>
180 oop ShenandoahRefProcThreadLocal::discovered_list_head<oop>() const {
181   return *reinterpret_cast<const oop*>(&_discovered_list);
182 }
183 
184 template <>
185 oop ShenandoahRefProcThreadLocal::discovered_list_head<narrowOop>() const {
186   return CompressedOops::decode(*reinterpret_cast<const narrowOop*>(&_discovered_list));
187 }
188 
189 template <>
190 void ShenandoahRefProcThreadLocal::set_discovered_list_head<narrowOop>(oop head) {
191   *discovered_list_addr<narrowOop>() = CompressedOops::encode(head);
192 }
193 
194 template <>
195 void ShenandoahRefProcThreadLocal::set_discovered_list_head<oop>(oop head) {
196   *discovered_list_addr<oop>() = head;
197 }
198 
199 ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(uint max_workers) :
200   _soft_reference_policy(nullptr),
201   _ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)),
202   _pending_list(nullptr),
203   _pending_list_tail(&_pending_list),
204   _iterate_discovered_list_id(0U),
205   _stats() {
206   for (size_t i = 0; i < max_workers; i++) {
207     _ref_proc_thread_locals[i].reset();
208   }
209 }
210 
211 void ShenandoahReferenceProcessor::reset_thread_locals() {
212   uint max_workers = ShenandoahHeap::heap()->max_workers();
213   for (uint i = 0; i < max_workers; i++) {
214     _ref_proc_thread_locals[i].reset();
215   }
216 }
217 
218 void ShenandoahReferenceProcessor::set_mark_closure(uint worker_id, ShenandoahMarkRefsSuperClosure* mark_closure) {
219   _ref_proc_thread_locals[worker_id].set_mark_closure(mark_closure);
220 }
221 
222 void ShenandoahReferenceProcessor::set_soft_reference_policy(bool clear) {
223   static AlwaysClearPolicy always_clear_policy;
224   static LRUMaxHeapPolicy lru_max_heap_policy;
225 
226   if (clear) {
227     log_info(gc, ref)("Clearing All SoftReferences");
228     _soft_reference_policy = &always_clear_policy;
229   } else {
230     _soft_reference_policy = &lru_max_heap_policy;
231   }
232 
233   _soft_reference_policy->setup();
234 }
235 
236 template <typename T>
237 bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const {
238   if (type == REF_FINAL) {
239     // A FinalReference is inactive if its next field is non-null. An application can't
240     // call enqueue() or clear() on a FinalReference.
241     return reference_next<T>(reference) != nullptr;
242   } else {
243     // A non-FinalReference is inactive if the referent is null. The referent can only
244     // be null if the application called Reference.enqueue() or Reference.clear().
245     return referent == nullptr;
246   }
247 }
248 
249 bool ShenandoahReferenceProcessor::is_strongly_live(oop referent) const {
250   return ShenandoahHeap::heap()->marking_context()->is_marked_strong(referent);
251 }
252 
253 bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const {
254   if (type != REF_SOFT) {
255     // Not a SoftReference
256     return false;
257   }
258 
259   // Ask SoftReference policy
260   const jlong clock = java_lang_ref_SoftReference::clock();
261   assert(clock != 0, "Clock not initialized");
262   assert(_soft_reference_policy != nullptr, "Policy not initialized");
263   return !_soft_reference_policy->should_clear_reference(reference, clock);
264 }
265 
266 template <typename T>
267 bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType type) const {
268   T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference);
269   T heap_oop = RawAccess<>::oop_load(referent_addr);
270   oop referent = CompressedOops::decode(heap_oop);
271 
272   if (is_inactive<T>(reference, referent, type)) {
273     log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference));
274     return false;
275   }
276 
277   if (is_strongly_live(referent)) {
278     log_trace(gc,ref)("Reference strongly live: " PTR_FORMAT, p2i(reference));
279     return false;
280   }
281 
282   if (is_softly_live(reference, type)) {
283     log_trace(gc,ref)("Reference softly live: " PTR_FORMAT, p2i(reference));
284     return false;
285   }
286 
287   return true;
288 }
289 
290 template <typename T>
291 bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const {
292   HeapWord* raw_referent = reference_referent_raw<T>(reference);
293   if (raw_referent == nullptr) {
294     // Reference has been cleared, by a call to Reference.enqueue()
295     // or Reference.clear() from the application, which means we
296     // should drop the reference.
297     return true;
298   }
299 
300   // Check if the referent is still alive, in which case we should
301   // drop the reference.
302   if (type == REF_PHANTOM) {
303     return ShenandoahHeap::heap()->complete_marking_context()->is_marked(raw_referent);
304   } else {
305     return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(raw_referent);
306   }
307 }
308 
309 template <typename T>
310 void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType type) const {
311   if (type == REF_FINAL) {
312     // Don't clear referent. It is needed by the Finalizer thread to make the call
313     // to finalize(). A FinalReference is instead made inactive by self-looping the
314     // next field. An application can't call FinalReference.enqueue(), so there is
315     // no race to worry about when setting the next field.
316     assert(reference_next<T>(reference) == nullptr, "Already inactive");
317     assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent_raw<T>(reference)), "only make inactive final refs with alive referents");
318     reference_set_next(reference, reference);
319   } else {
320     // Clear referent
321     reference_clear_referent(reference);
322   }
323 }
324 
325 template <typename T>
326 bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, uint worker_id) {
327   if (!should_discover<T>(reference, type)) {
328     // Not discovered
329     return false;
330   }
331 
332   if (reference_discovered<T>(reference) != nullptr) {
333     // Already discovered. This can happen if the reference is marked finalizable first, and then strong,
334     // in which case it will be seen 2x by marking.
335     log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference));
336     return true;
337   }
338 
339   if (type == REF_FINAL) {
340     ShenandoahMarkRefsSuperClosure* cl = _ref_proc_thread_locals[worker_id].mark_closure();
341     bool weak = cl->is_weak();
342     cl->set_weak(true);
343     if (UseCompressedOops) {
344       cl->do_oop(reinterpret_cast<narrowOop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
345     } else {
346       cl->do_oop(reinterpret_cast<oop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
347     }
348     cl->set_weak(weak);
349   }
350 
351   // Add reference to discovered list
352   ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id];
353   oop discovered_head = refproc_data.discovered_list_head<T>();
354   if (discovered_head == nullptr) {
355     // Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their
356     // discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered
357     discovered_head = reference;
358   }
359   if (reference_cas_discovered<T>(reference, discovered_head)) {
360     refproc_data.set_discovered_list_head<T>(reference);
361     assert(refproc_data.discovered_list_head<T>() == reference, "reference must be new discovered head");
362     log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
363     _ref_proc_thread_locals[worker_id].inc_discovered(type);
364   }
365   return true;
366 }
367 
368 bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceType type) {
369   if (!RegisterReferences) {
370     // Reference processing disabled
371     return false;
372   }
373 
374   log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
375   uint worker_id = WorkerThread::worker_id();
376   _ref_proc_thread_locals[worker_id].inc_encountered(type);
377 
378   if (UseCompressedOops) {
379     return discover<narrowOop>(reference, type, worker_id);
380   } else {
381     return discover<oop>(reference, type, worker_id);
382   }
383 }
384 
385 template <typename T>
386 oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) {
387   log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
388 
389 #ifdef ASSERT
390   HeapWord* raw_referent = reference_referent_raw<T>(reference);
391   assert(raw_referent == nullptr || ShenandoahHeap::heap()->marking_context()->is_marked(raw_referent),
392          "only drop references with alive referents");
393 #endif
394 
395   // Unlink and return next in list
396   oop next = reference_discovered<T>(reference);
397   reference_set_discovered<T>(reference, nullptr);
398   return next;
399 }
400 
401 template <typename T>
402 T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) {
403   log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
404 
405   // Update statistics
406   _ref_proc_thread_locals[worker_id].inc_enqueued(type);
407 
408   // Make reference inactive
409   make_inactive<T>(reference, type);
410 
411   // Return next in list
412   return reference_discovered_addr<T>(reference);
413 }
414 
415 template <typename T>
416 void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) {
417   log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>()));
418   T* list = refproc_data.discovered_list_addr<T>();
419   // The list head is basically a GC root, we need to resolve and update it,
420   // otherwise we will later swap a from-space ref into Universe::pending_list().
421   if (!CompressedOops::is_null(*list)) {
422     oop first_resolved = lrb(CompressedOops::decode_not_null(*list));
423     set_oop_field(list, first_resolved);
424   }
425   T* p = list;
426   while (true) {
427     const oop reference = lrb(CompressedOops::decode(*p));
428     if (reference == nullptr) {
429       break;
430     }
431     log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference));
432     const ReferenceType type = reference_type(reference);
433 
434     if (should_drop<T>(reference, type)) {
435       set_oop_field(p, drop<T>(reference, type));
436     } else {
437       p = keep<T>(reference, type, worker_id);
438     }
439 
440     const oop discovered = lrb(reference_discovered<T>(reference));
441     if (reference == discovered) {
442       // Reset terminating self-loop to null
443       reference_set_discovered<T>(reference, oop(nullptr));
444       break;
445     }
446   }
447 
448   // Prepend discovered references to internal pending list
449   if (!CompressedOops::is_null(*list)) {
450     oop head = lrb(CompressedOops::decode_not_null(*list));
451     shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
452     oop prev = Atomic::xchg(&_pending_list, head);
453     RawAccess<>::oop_store(p, prev);
454     if (prev == nullptr) {
455       // First to prepend to list, record tail
456       _pending_list_tail = reinterpret_cast<void*>(p);
457     }
458 
459     // Clear discovered list
460     set_oop_field(list, oop(nullptr));
461   }
462 }
463 
464 void ShenandoahReferenceProcessor::work() {
465   // Process discovered references
466   uint max_workers = ShenandoahHeap::heap()->max_workers();
467   uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1;
468   while (worker_id < max_workers) {
469     if (UseCompressedOops) {
470       process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id);
471     } else {
472       process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id);
473     }
474     worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1;
475   }
476 }
477 
478 class ShenandoahReferenceProcessorTask : public WorkerTask {
479 private:
480   bool const                          _concurrent;
481   ShenandoahPhaseTimings::Phase const _phase;
482   ShenandoahReferenceProcessor* const _reference_processor;
483 
484 public:
485   ShenandoahReferenceProcessorTask(ShenandoahPhaseTimings::Phase phase, bool concurrent, ShenandoahReferenceProcessor* reference_processor) :
486     WorkerTask("ShenandoahReferenceProcessorTask"),
487     _concurrent(concurrent),
488     _phase(phase),
489     _reference_processor(reference_processor) {
490   }
491 
492   virtual void work(uint worker_id) {
493     if (_concurrent) {
494       ShenandoahConcurrentWorkerSession worker_session(worker_id);
495       ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id);
496       _reference_processor->work();
497     } else {
498       ShenandoahParallelWorkerSession worker_session(worker_id);
499       ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id);
500       _reference_processor->work();
501     }
502   }
503 };
504 
505 void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) {
506 
507   Atomic::release_store_fence(&_iterate_discovered_list_id, 0U);
508 
509   // Process discovered lists
510   ShenandoahReferenceProcessorTask task(phase, concurrent, this);
511   workers->run_task(&task);
512 
513   // Update SoftReference clock
514   soft_reference_update_clock();
515 
516   // Collect, log and trace statistics
517   collect_statistics();
518 
519   enqueue_references(concurrent);
520 }
521 
522 void ShenandoahReferenceProcessor::enqueue_references_locked() {
523   // Prepend internal pending list to external pending list
524   shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
525   if (UseCompressedOops) {
526     *reinterpret_cast<narrowOop*>(_pending_list_tail) = CompressedOops::encode(Universe::swap_reference_pending_list(_pending_list));
527   } else {
528     *reinterpret_cast<oop*>(_pending_list_tail) = Universe::swap_reference_pending_list(_pending_list);
529   }
530 }
531 
532 void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
533   if (_pending_list == nullptr) {
534     // Nothing to enqueue
535     return;
536   }
537 
538   if (!concurrent) {
539     // When called from mark-compact or degen-GC, the locking is done by the VMOperation,
540     enqueue_references_locked();
541   } else {
542     // Heap_lock protects external pending list
543     MonitorLocker ml(Heap_lock);
544 
545     enqueue_references_locked();
546 
547     // Notify ReferenceHandler thread
548     ml.notify_all();
549   }
550 
551   // Reset internal pending list
552   _pending_list = nullptr;
553   _pending_list_tail = &_pending_list;
554 }
555 
556 template<typename T>
557 void ShenandoahReferenceProcessor::clean_discovered_list(T* list) {
558   T discovered = *list;
559   while (!CompressedOops::is_null(discovered)) {
560     oop discovered_ref = CompressedOops::decode_not_null(discovered);
561     set_oop_field<T>(list, oop(nullptr));
562     list = reference_discovered_addr<T>(discovered_ref);
563     discovered = *list;
564   }
565 }
566 
567 void ShenandoahReferenceProcessor::abandon_partial_discovery() {
568   uint max_workers = ShenandoahHeap::heap()->max_workers();
569   for (uint index = 0; index < max_workers; index++) {
570     if (UseCompressedOops) {
571       clean_discovered_list<narrowOop>(_ref_proc_thread_locals[index].discovered_list_addr<narrowOop>());
572     } else {
573       clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>());
574     }
575   }
576   if (_pending_list != nullptr) {
577     oop pending = _pending_list;
578     _pending_list = nullptr;
579     if (UseCompressedOops) {
580       narrowOop* list = reference_discovered_addr<narrowOop>(pending);
581       clean_discovered_list<narrowOop>(list);
582     } else {
583       oop* list = reference_discovered_addr<oop>(pending);
584       clean_discovered_list<oop>(list);
585     }
586   }
587   _pending_list_tail = &_pending_list;
588 }
589 
590 void ShenandoahReferenceProcessor::collect_statistics() {
591   Counters encountered = {};
592   Counters discovered = {};
593   Counters enqueued = {};
594   uint max_workers = ShenandoahHeap::heap()->max_workers();
595   for (uint i = 0; i < max_workers; i++) {
596     for (size_t type = 0; type < reference_type_count; type++) {
597       encountered[type] += _ref_proc_thread_locals[i].encountered((ReferenceType)type);
598       discovered[type] += _ref_proc_thread_locals[i].discovered((ReferenceType)type);
599       enqueued[type] += _ref_proc_thread_locals[i].enqueued((ReferenceType)type);
600     }
601   }
602 
603   _stats = ReferenceProcessorStats(discovered[REF_SOFT],
604                                    discovered[REF_WEAK],
605                                    discovered[REF_FINAL],
606                                    discovered[REF_PHANTOM]);
607 
608   log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
609                    encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]);
610   log_info(gc,ref)("Discovered  references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
611                    discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]);
612   log_info(gc,ref)("Enqueued    references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
613                    enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]);
614 }