1 /*
2 * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "classfile/javaClasses.hpp"
29 #include "gc/shared/workerThread.hpp"
30 #include "gc/shenandoah/shenandoahGeneration.hpp"
31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
33 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "gc/shenandoah/shenandoahUtils.hpp"
36 #include "runtime/atomic.hpp"
37 #include "logging/log.hpp"
38
39 static ReferenceType reference_type(oop reference) {
40 return InstanceKlass::cast(reference->klass())->reference_type();
41 }
42
43 static const char* reference_type_name(ReferenceType type) {
44 switch (type) {
45 case REF_SOFT:
46 return "Soft";
47
48 case REF_WEAK:
49 return "Weak";
50
51 case REF_FINAL:
52 return "Final";
53
54 case REF_PHANTOM:
55 return "Phantom";
56
57 default:
58 ShouldNotReachHere();
59 return nullptr;
60 }
61 }
62
63 template <typename T>
64 static void card_mark_barrier(T* field, oop value) {
65 assert(ShenandoahCardBarrier, "Card-mark barrier should be on");
66 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
67 assert(heap->is_in_or_null(value), "Should be in heap");
68 if (heap->is_in_old(field) && heap->is_in_young(value)) {
69 // For Shenandoah, each generation collects all the _referents_ that belong to the
70 // collected generation. We can end up with discovered lists that contain a mixture
71 // of old and young _references_. These references are linked together through the
72 // discovered field in java.lang.Reference. In some cases, creating or editing this
73 // list may result in the creation of _new_ old-to-young pointers which must dirty
74 // the corresponding card. Failing to do this may cause heap verification errors and
75 // lead to incorrect GC behavior.
76 heap->old_generation()->mark_card_as_dirty(field);
77 }
78 }
79
80 template <typename T>
81 static void set_oop_field(T* field, oop value);
82
83 template <>
84 void set_oop_field<oop>(oop* field, oop value) {
85 *field = value;
86 if (ShenandoahCardBarrier) {
87 card_mark_barrier(field, value);
88 }
89 }
90
91 template <>
92 void set_oop_field<narrowOop>(narrowOop* field, oop value) {
93 *field = CompressedOops::encode(value);
94 if (ShenandoahCardBarrier) {
95 card_mark_barrier(field, value);
96 }
97 }
98
99 static oop lrb(oop obj) {
100 if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) {
101 return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
102 } else {
103 return obj;
104 }
105 }
106
107 template <typename T>
108 static volatile T* reference_referent_addr(oop reference) {
109 return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference);
110 }
111
112 inline oop reference_coop_decode_raw(narrowOop v) {
113 return CompressedOops::is_null(v) ? nullptr : CompressedOops::decode_raw(v);
114 }
115
116 inline oop reference_coop_decode_raw(oop v) {
117 return v;
118 }
119
120 // Raw referent, it can be dead. You cannot treat it as oop without additional safety
121 // checks, this is why it is HeapWord*. The decoding uses a special-case inlined
122 // CompressedOops::decode method that bypasses normal oop-ness checks.
123 template <typename T>
124 static HeapWord* reference_referent_raw(oop reference) {
125 T raw_oop = Atomic::load(reference_referent_addr<T>(reference));
126 return cast_from_oop<HeapWord*>(reference_coop_decode_raw(raw_oop));
127 }
128
129 static void reference_clear_referent(oop reference) {
130 java_lang_ref_Reference::clear_referent_raw(reference);
131 }
132
133 template <typename T>
134 static T* reference_discovered_addr(oop reference) {
135 return reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference));
136 }
137
138 template <typename T>
139 static oop reference_discovered(oop reference) {
140 T heap_oop = *reference_discovered_addr<T>(reference);
141 return lrb(CompressedOops::decode(heap_oop));
142 }
143
144 template <typename T>
145 static void reference_set_discovered(oop reference, oop discovered);
146
147 template <>
148 void reference_set_discovered<oop>(oop reference, oop discovered) {
149 *reference_discovered_addr<oop>(reference) = discovered;
150 }
151
152 template <>
153 void reference_set_discovered<narrowOop>(oop reference, oop discovered) {
154 *reference_discovered_addr<narrowOop>(reference) = CompressedOops::encode(discovered);
155 }
156
157 template<typename T>
158 static bool reference_cas_discovered(oop reference, oop discovered) {
159 T* addr = reinterpret_cast<T *>(java_lang_ref_Reference::discovered_addr_raw(reference));
160 return ShenandoahHeap::atomic_update_oop_check(discovered, addr, nullptr);
161 }
162
163 template <typename T>
164 static T* reference_next_addr(oop reference) {
165 return reinterpret_cast<T*>(java_lang_ref_Reference::next_addr_raw(reference));
166 }
167
168 template <typename T>
169 static oop reference_next(oop reference) {
170 T heap_oop = RawAccess<>::oop_load(reference_next_addr<T>(reference));
171 return lrb(CompressedOops::decode(heap_oop));
172 }
173
174 static void reference_set_next(oop reference, oop next) {
175 java_lang_ref_Reference::set_next_raw(reference, next);
176 }
177
178 static void soft_reference_update_clock() {
179 const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
180 java_lang_ref_SoftReference::set_clock(now);
181 }
182
183 ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() :
184 _discovered_list(nullptr),
185 _encountered_count(),
186 _discovered_count(),
187 _enqueued_count() {
188 }
189
190 void ShenandoahRefProcThreadLocal::reset() {
191 _discovered_list = nullptr;
192 _mark_closure = nullptr;
193 for (uint i = 0; i < reference_type_count; i++) {
194 _encountered_count[i] = 0;
195 _discovered_count[i] = 0;
196 _enqueued_count[i] = 0;
197 }
198 }
199
200 template <typename T>
201 T* ShenandoahRefProcThreadLocal::discovered_list_addr() {
202 return reinterpret_cast<T*>(&_discovered_list);
203 }
204
205 template <>
206 oop ShenandoahRefProcThreadLocal::discovered_list_head<oop>() const {
207 return *reinterpret_cast<const oop*>(&_discovered_list);
208 }
209
210 template <>
211 oop ShenandoahRefProcThreadLocal::discovered_list_head<narrowOop>() const {
212 return CompressedOops::decode(*reinterpret_cast<const narrowOop*>(&_discovered_list));
213 }
214
215 template <>
216 void ShenandoahRefProcThreadLocal::set_discovered_list_head<narrowOop>(oop head) {
217 *discovered_list_addr<narrowOop>() = CompressedOops::encode(head);
218 }
219
220 template <>
221 void ShenandoahRefProcThreadLocal::set_discovered_list_head<oop>(oop head) {
222 *discovered_list_addr<oop>() = head;
223 }
224
225 ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(uint max_workers) :
226 _soft_reference_policy(nullptr),
227 _ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)),
228 _pending_list(nullptr),
229 _pending_list_tail(&_pending_list),
230 _iterate_discovered_list_id(0U),
231 _stats() {
232 for (size_t i = 0; i < max_workers; i++) {
233 _ref_proc_thread_locals[i].reset();
234 }
235 }
236
237 void ShenandoahReferenceProcessor::reset_thread_locals() {
238 uint max_workers = ShenandoahHeap::heap()->max_workers();
239 for (uint i = 0; i < max_workers; i++) {
240 _ref_proc_thread_locals[i].reset();
241 }
242 }
243
244 void ShenandoahReferenceProcessor::set_mark_closure(uint worker_id, ShenandoahMarkRefsSuperClosure* mark_closure) {
245 _ref_proc_thread_locals[worker_id].set_mark_closure(mark_closure);
246 }
247
248 void ShenandoahReferenceProcessor::set_soft_reference_policy(bool clear) {
249 static AlwaysClearPolicy always_clear_policy;
250 static LRUMaxHeapPolicy lru_max_heap_policy;
251
252 if (clear) {
253 log_info(gc, ref)("Clearing All SoftReferences");
254 _soft_reference_policy = &always_clear_policy;
255 } else {
256 _soft_reference_policy = &lru_max_heap_policy;
257 }
258
259 _soft_reference_policy->setup();
260 }
261
262 template <typename T>
263 bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const {
264 if (type == REF_FINAL) {
265 // A FinalReference is inactive if its next field is non-null. An application can't
266 // call enqueue() or clear() on a FinalReference.
267 return reference_next<T>(reference) != nullptr;
268 } else {
269 // A non-FinalReference is inactive if the referent is null. The referent can only
270 // be null if the application called Reference.enqueue() or Reference.clear().
271 return referent == nullptr;
272 }
273 }
274
275 bool ShenandoahReferenceProcessor::is_strongly_live(oop referent) const {
276 return ShenandoahHeap::heap()->marking_context()->is_marked_strong(referent);
277 }
278
279 bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const {
280 if (type != REF_SOFT) {
281 // Not a SoftReference
282 return false;
283 }
284
285 // Ask SoftReference policy
286 const jlong clock = java_lang_ref_SoftReference::clock();
287 assert(clock != 0, "Clock not initialized");
288 assert(_soft_reference_policy != nullptr, "Policy not initialized");
289 return !_soft_reference_policy->should_clear_reference(reference, clock);
290 }
291
292 template <typename T>
293 bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType type) const {
294 T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference);
295 T heap_oop = RawAccess<>::oop_load(referent_addr);
296 oop referent = CompressedOops::decode(heap_oop);
297 ShenandoahHeap* heap = ShenandoahHeap::heap();
298
299 if (is_inactive<T>(reference, referent, type)) {
300 log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference));
301 return false;
302 }
303
304 if (is_strongly_live(referent)) {
305 log_trace(gc,ref)("Reference strongly live: " PTR_FORMAT, p2i(reference));
306 return false;
307 }
308
309 if (is_softly_live(reference, type)) {
310 log_trace(gc,ref)("Reference softly live: " PTR_FORMAT, p2i(reference));
311 return false;
312 }
313
314 if (!heap->is_in_active_generation(referent)) {
315 log_trace(gc,ref)("Referent outside of active generation: " PTR_FORMAT, p2i(referent));
316 return false;
317 }
318
319 return true;
320 }
321
322 template <typename T>
323 bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const {
324 HeapWord* raw_referent = reference_referent_raw<T>(reference);
325 if (raw_referent == nullptr) {
326 // Reference has been cleared, by a call to Reference.enqueue()
327 // or Reference.clear() from the application, which means we
328 // should drop the reference.
329 return true;
330 }
331
332 ShenandoahHeap* heap = ShenandoahHeap::heap();
333 // Check if the referent is still alive, in which case we should
334 // drop the reference.
335 if (type == REF_PHANTOM) {
336 return heap->active_generation()->complete_marking_context()->is_marked(raw_referent);
337 } else {
338 return heap->active_generation()->complete_marking_context()->is_marked_strong(raw_referent);
339 }
340 }
341
342 template <typename T>
343 void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType type) const {
344 if (type == REF_FINAL) {
345 // Don't clear referent. It is needed by the Finalizer thread to make the call
346 // to finalize(). A FinalReference is instead made inactive by self-looping the
347 // next field. An application can't call FinalReference.enqueue(), so there is
348 // no race to worry about when setting the next field.
349 assert(reference_next<T>(reference) == nullptr, "Already inactive");
350 assert(ShenandoahHeap::heap()->active_generation()->complete_marking_context()->is_marked(reference_referent_raw<T>(reference)), "only make inactive final refs with alive referents");
351 reference_set_next(reference, reference);
352 } else {
353 // Clear referent
354 reference_clear_referent(reference);
355 }
356 }
357
358 template <typename T>
359 bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, uint worker_id) {
360 if (!should_discover<T>(reference, type)) {
361 // Not discovered
362 return false;
363 }
364
365 if (reference_discovered<T>(reference) != nullptr) {
366 // Already discovered. This can happen if the reference is marked finalizable first, and then strong,
367 // in which case it will be seen 2x by marking.
368 log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference));
369 return true;
370 }
371
372 if (type == REF_FINAL) {
373 ShenandoahMarkRefsSuperClosure* cl = _ref_proc_thread_locals[worker_id].mark_closure();
374 bool weak = cl->is_weak();
375 cl->set_weak(true);
376 if (UseCompressedOops) {
377 cl->do_oop(reinterpret_cast<narrowOop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
378 } else {
379 cl->do_oop(reinterpret_cast<oop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
380 }
381 cl->set_weak(weak);
382 }
383
384 // Add reference to discovered list
385 // Each worker thread has a private copy of refproc_data, which includes a private discovered list. This means
386 // there's no risk that a different worker thread will try to manipulate my discovered list head while I'm making
387 // reference the head of my discovered list.
388 ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id];
389 oop discovered_head = refproc_data.discovered_list_head<T>();
390 if (discovered_head == nullptr) {
391 // Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their
392 // discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered
393 discovered_head = reference;
394 }
395 if (reference_cas_discovered<T>(reference, discovered_head)) {
396 // We successfully set this reference object's next pointer to discovered_head. This marks reference as discovered.
397 // If reference_cas_discovered fails, that means some other worker thread took credit for discovery of this reference,
398 // and that other thread will place reference on its discovered list, so I can ignore reference.
399
400 // In case we have created an interesting pointer, mark the remembered set card as dirty.
401 if (ShenandoahCardBarrier) {
402 T* addr = reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference));
403 card_mark_barrier(addr, discovered_head);
404 }
405
406 // Make the discovered_list_head point to reference.
407 refproc_data.set_discovered_list_head<T>(reference);
408 assert(refproc_data.discovered_list_head<T>() == reference, "reference must be new discovered head");
409 log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
410 _ref_proc_thread_locals[worker_id].inc_discovered(type);
411 }
412 return true;
413 }
414
415 bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceType type) {
416 if (!RegisterReferences) {
417 // Reference processing disabled
418 return false;
419 }
420
421 log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s, %s)",
422 p2i(reference), reference_type_name(type), ShenandoahHeap::heap()->heap_region_containing(reference)->affiliation_name());
423 uint worker_id = WorkerThread::worker_id();
424 _ref_proc_thread_locals[worker_id].inc_encountered(type);
425
426 if (UseCompressedOops) {
427 return discover<narrowOop>(reference, type, worker_id);
428 } else {
429 return discover<oop>(reference, type, worker_id);
430 }
431 }
432
433 template <typename T>
434 oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) {
435 log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
436
437 ShenandoahHeap* heap = ShenandoahHeap::heap();
438 HeapWord* referent = reference_referent_raw<T>(reference);
439 assert(referent == nullptr || heap->marking_context()->is_marked(referent), "only drop references with alive referents");
440
441 // Unlink and return next in list
442 oop next = reference_discovered<T>(reference);
443 reference_set_discovered<T>(reference, nullptr);
444 // When this reference was discovered, it would not have been marked. If it ends up surviving
445 // the cycle, we need to dirty the card if the reference is old and the referent is young. Note
446 // that if the reference is not dropped, then its pointer to the referent will be nulled before
447 // evacuation begins so card does not need to be dirtied.
448 if (ShenandoahCardBarrier) {
449 card_mark_barrier(cast_from_oop<HeapWord*>(reference), cast_to_oop(referent));
450 }
451 return next;
452 }
453
454 template <typename T>
455 T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) {
456 log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
457
458 // Update statistics
459 _ref_proc_thread_locals[worker_id].inc_enqueued(type);
460
461 // Make reference inactive
462 make_inactive<T>(reference, type);
463
464 // Return next in list
465 return reference_discovered_addr<T>(reference);
466 }
467
468 template <typename T>
469 void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) {
470 log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>()));
471 T* list = refproc_data.discovered_list_addr<T>();
472 // The list head is basically a GC root, we need to resolve and update it,
473 // otherwise we will later swap a from-space ref into Universe::pending_list().
474 if (!CompressedOops::is_null(*list)) {
475 oop first_resolved = lrb(CompressedOops::decode_not_null(*list));
476 set_oop_field(list, first_resolved);
477 }
478 T* p = list;
479 while (true) {
480 const oop reference = lrb(CompressedOops::decode(*p));
481 if (reference == nullptr) {
482 break;
483 }
484 log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference));
485 const ReferenceType type = reference_type(reference);
486
487 if (should_drop<T>(reference, type)) {
488 set_oop_field(p, drop<T>(reference, type));
489 } else {
490 p = keep<T>(reference, type, worker_id);
491 }
492
493 const oop discovered = lrb(reference_discovered<T>(reference));
494 if (reference == discovered) {
495 // Reset terminating self-loop to null
496 reference_set_discovered<T>(reference, oop(nullptr));
497 break;
498 }
499 }
500
501 // Prepend discovered references to internal pending list
502 // set_oop_field maintains the card mark barrier as this list is constructed.
503 if (!CompressedOops::is_null(*list)) {
504 oop head = lrb(CompressedOops::decode_not_null(*list));
505 shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
506 oop prev = Atomic::xchg(&_pending_list, head);
507 set_oop_field(p, prev);
508 if (prev == nullptr) {
509 // First to prepend to list, record tail
510 _pending_list_tail = reinterpret_cast<void*>(p);
511 }
512
513 // Clear discovered list
514 set_oop_field(list, oop(nullptr));
515 }
516 }
517
518 void ShenandoahReferenceProcessor::work() {
519 // Process discovered references
520 uint max_workers = ShenandoahHeap::heap()->max_workers();
521 uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1;
522 while (worker_id < max_workers) {
523 if (UseCompressedOops) {
524 process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id);
525 } else {
526 process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id);
527 }
528 worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1;
529 }
530 }
531
532 class ShenandoahReferenceProcessorTask : public WorkerTask {
533 private:
534 bool const _concurrent;
535 ShenandoahPhaseTimings::Phase const _phase;
536 ShenandoahReferenceProcessor* const _reference_processor;
537
538 public:
539 ShenandoahReferenceProcessorTask(ShenandoahPhaseTimings::Phase phase, bool concurrent, ShenandoahReferenceProcessor* reference_processor) :
540 WorkerTask("ShenandoahReferenceProcessorTask"),
541 _concurrent(concurrent),
542 _phase(phase),
543 _reference_processor(reference_processor) {
544 }
545
546 virtual void work(uint worker_id) {
547 if (_concurrent) {
548 ShenandoahConcurrentWorkerSession worker_session(worker_id);
549 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id);
550 _reference_processor->work();
551 } else {
552 ShenandoahParallelWorkerSession worker_session(worker_id);
553 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id);
554 _reference_processor->work();
555 }
556 }
557 };
558
559 void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) {
560
561 Atomic::release_store_fence(&_iterate_discovered_list_id, 0U);
562
563 // Process discovered lists
564 ShenandoahReferenceProcessorTask task(phase, concurrent, this);
565 workers->run_task(&task);
566
567 // Update SoftReference clock
568 soft_reference_update_clock();
569
570 // Collect, log and trace statistics
571 collect_statistics();
572
573 enqueue_references(concurrent);
574 }
575
576 void ShenandoahReferenceProcessor::enqueue_references_locked() {
577 // Prepend internal pending list to external pending list
578 shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
579
580 // During reference processing, we maintain a local list of references that are identified by
581 // _pending_list and _pending_list_tail. _pending_list_tail points to the next field of the last Reference object on
582 // the local list.
583 //
584 // There is also a global list of reference identified by Universe::_reference_pending_list
585
586 // The following code has the effect of:
587 // 1. Making the global Universe::_reference_pending_list point to my local list
588 // 2. Overwriting the next field of the last Reference on my local list to point at the previous head of the
589 // global Universe::_reference_pending_list
590
591 oop former_head_of_global_list = Universe::swap_reference_pending_list(_pending_list);
592 if (UseCompressedOops) {
593 set_oop_field<narrowOop>(reinterpret_cast<narrowOop*>(_pending_list_tail), former_head_of_global_list);
594 } else {
595 set_oop_field<oop>(reinterpret_cast<oop*>(_pending_list_tail), former_head_of_global_list);
596 }
597 }
598
599 void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
600 if (_pending_list == nullptr) {
601 // Nothing to enqueue
602 return;
603 }
604 if (!concurrent) {
605 // When called from mark-compact or degen-GC, the locking is done by the VMOperation,
606 enqueue_references_locked();
607 } else {
608 // Heap_lock protects external pending list
609 MonitorLocker ml(Heap_lock);
610
611 enqueue_references_locked();
612
613 // Notify ReferenceHandler thread
614 ml.notify_all();
615 }
616
617 // Reset internal pending list
618 _pending_list = nullptr;
619 _pending_list_tail = &_pending_list;
620 }
621
622 template<typename T>
623 void ShenandoahReferenceProcessor::clean_discovered_list(T* list) {
624 T discovered = *list;
625 while (!CompressedOops::is_null(discovered)) {
626 oop discovered_ref = CompressedOops::decode_not_null(discovered);
627 set_oop_field<T>(list, oop(nullptr));
628 list = reference_discovered_addr<T>(discovered_ref);
629 discovered = *list;
630 }
631 }
632
633 void ShenandoahReferenceProcessor::abandon_partial_discovery() {
634 uint max_workers = ShenandoahHeap::heap()->max_workers();
635 for (uint index = 0; index < max_workers; index++) {
636 if (UseCompressedOops) {
637 clean_discovered_list<narrowOop>(_ref_proc_thread_locals[index].discovered_list_addr<narrowOop>());
638 } else {
639 clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>());
640 }
641 }
642 if (_pending_list != nullptr) {
643 oop pending = _pending_list;
644 _pending_list = nullptr;
645 if (UseCompressedOops) {
646 narrowOop* list = reference_discovered_addr<narrowOop>(pending);
647 clean_discovered_list<narrowOop>(list);
648 } else {
649 oop* list = reference_discovered_addr<oop>(pending);
650 clean_discovered_list<oop>(list);
651 }
652 }
653 _pending_list_tail = &_pending_list;
654 }
655
656 void ShenandoahReferenceProcessor::collect_statistics() {
657 Counters encountered = {};
658 Counters discovered = {};
659 Counters enqueued = {};
660 uint max_workers = ShenandoahHeap::heap()->max_workers();
661 for (uint i = 0; i < max_workers; i++) {
662 for (size_t type = 0; type < reference_type_count; type++) {
663 encountered[type] += _ref_proc_thread_locals[i].encountered((ReferenceType)type);
664 discovered[type] += _ref_proc_thread_locals[i].discovered((ReferenceType)type);
665 enqueued[type] += _ref_proc_thread_locals[i].enqueued((ReferenceType)type);
666 }
667 }
668
669 _stats = ReferenceProcessorStats(discovered[REF_SOFT],
670 discovered[REF_WEAK],
671 discovered[REF_FINAL],
672 discovered[REF_PHANTOM]);
673
674 log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
675 encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]);
676 log_info(gc,ref)("Discovered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
677 discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]);
678 log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
679 enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]);
680 }