1 /*
2 * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "gc/shared/workerThread.hpp"
29 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
30 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
31 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
32 #include "gc/shenandoah/shenandoahUtils.hpp"
33 #include "runtime/atomic.hpp"
34 #include "logging/log.hpp"
35
36 static ReferenceType reference_type(oop reference) {
37 return InstanceKlass::cast(reference->klass())->reference_type();
38 }
39
40 static const char* reference_type_name(ReferenceType type) {
41 switch (type) {
42 case REF_SOFT:
43 return "Soft";
44
45 case REF_WEAK:
46 return "Weak";
47
48 case REF_FINAL:
49 return "Final";
50
51 case REF_PHANTOM:
52 return "Phantom";
53
54 default:
55 ShouldNotReachHere();
56 return nullptr;
57 }
58 }
59
60 template <typename T>
61 static void set_oop_field(T* field, oop value);
62
63 template <>
64 void set_oop_field<oop>(oop* field, oop value) {
65 *field = value;
66 }
67
68 template <>
69 void set_oop_field<narrowOop>(narrowOop* field, oop value) {
70 *field = CompressedOops::encode(value);
71 }
72
73 static oop lrb(oop obj) {
74 if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) {
75 return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
76 } else {
77 return obj;
78 }
79 }
80
81 template <typename T>
82 static volatile T* reference_referent_addr(oop reference) {
83 return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference);
84 }
85
86 template <typename T>
87 static oop reference_referent(oop reference) {
88 T heap_oop = Atomic::load(reference_referent_addr<T>(reference));
89 return CompressedOops::decode(heap_oop);
90 }
240 }
241
242 bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const {
243 if (type != REF_SOFT) {
244 // Not a SoftReference
245 return false;
246 }
247
248 // Ask SoftReference policy
249 const jlong clock = java_lang_ref_SoftReference::clock();
250 assert(clock != 0, "Clock not initialized");
251 assert(_soft_reference_policy != nullptr, "Policy not initialized");
252 return !_soft_reference_policy->should_clear_reference(reference, clock);
253 }
254
255 template <typename T>
256 bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType type) const {
257 T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference);
258 T heap_oop = RawAccess<>::oop_load(referent_addr);
259 oop referent = CompressedOops::decode(heap_oop);
260
261 if (is_inactive<T>(reference, referent, type)) {
262 log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference));
263 return false;
264 }
265
266 if (is_strongly_live(referent)) {
267 log_trace(gc,ref)("Reference strongly live: " PTR_FORMAT, p2i(reference));
268 return false;
269 }
270
271 if (is_softly_live(reference, type)) {
272 log_trace(gc,ref)("Reference softly live: " PTR_FORMAT, p2i(reference));
273 return false;
274 }
275
276 return true;
277 }
278
279 template <typename T>
280 bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const {
281 const oop referent = reference_referent<T>(reference);
282 if (referent == nullptr) {
283 // Reference has been cleared, by a call to Reference.enqueue()
284 // or Reference.clear() from the application, which means we
285 // should drop the reference.
286 return true;
287 }
288
289 // Check if the referent is still alive, in which case we should
290 // drop the reference.
291 if (type == REF_PHANTOM) {
292 return ShenandoahHeap::heap()->complete_marking_context()->is_marked(referent);
293 } else {
294 return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(referent);
295 }
321 if (reference_discovered<T>(reference) != nullptr) {
322 // Already discovered. This can happen if the reference is marked finalizable first, and then strong,
323 // in which case it will be seen 2x by marking.
324 log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference));
325 return true;
326 }
327
328 if (type == REF_FINAL) {
329 ShenandoahMarkRefsSuperClosure* cl = _ref_proc_thread_locals[worker_id].mark_closure();
330 bool weak = cl->is_weak();
331 cl->set_weak(true);
332 if (UseCompressedOops) {
333 cl->do_oop(reinterpret_cast<narrowOop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
334 } else {
335 cl->do_oop(reinterpret_cast<oop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
336 }
337 cl->set_weak(weak);
338 }
339
340 // Add reference to discovered list
341 ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id];
342 oop discovered_head = refproc_data.discovered_list_head<T>();
343 if (discovered_head == nullptr) {
344 // Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their
345 // discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered
346 discovered_head = reference;
347 }
348 if (reference_cas_discovered<T>(reference, discovered_head)) {
349 refproc_data.set_discovered_list_head<T>(reference);
350 assert(refproc_data.discovered_list_head<T>() == reference, "reference must be new discovered head");
351 log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
352 _ref_proc_thread_locals[worker_id].inc_discovered(type);
353 }
354 return true;
355 }
356
357 bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceType type) {
358 if (!RegisterReferences) {
359 // Reference processing disabled
360 return false;
361 }
362
363 log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
364 uint worker_id = WorkerThread::worker_id();
365 _ref_proc_thread_locals[worker_id].inc_encountered(type);
366
367 if (UseCompressedOops) {
368 return discover<narrowOop>(reference, type, worker_id);
369 } else {
370 return discover<oop>(reference, type, worker_id);
371 }
372 }
373
374 template <typename T>
375 oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) {
376 log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
377
378 #ifdef ASSERT
379 oop referent = reference_referent<T>(reference);
380 assert(referent == nullptr || ShenandoahHeap::heap()->marking_context()->is_marked(referent),
381 "only drop references with alive referents");
382 #endif
383
384 // Unlink and return next in list
385 oop next = reference_discovered<T>(reference);
386 reference_set_discovered<T>(reference, nullptr);
387 return next;
388 }
389
390 template <typename T>
391 T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) {
392 log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
393
394 // Update statistics
395 _ref_proc_thread_locals[worker_id].inc_enqueued(type);
396
397 // Make reference inactive
398 make_inactive<T>(reference, type);
399
400 // Return next in list
401 return reference_discovered_addr<T>(reference);
402 }
403
404 template <typename T>
405 void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) {
406 log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>()));
418 break;
419 }
420 log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference));
421 const ReferenceType type = reference_type(reference);
422
423 if (should_drop<T>(reference, type)) {
424 set_oop_field(p, drop<T>(reference, type));
425 } else {
426 p = keep<T>(reference, type, worker_id);
427 }
428
429 const oop discovered = lrb(reference_discovered<T>(reference));
430 if (reference == discovered) {
431 // Reset terminating self-loop to null
432 reference_set_discovered<T>(reference, oop(nullptr));
433 break;
434 }
435 }
436
437 // Prepend discovered references to internal pending list
438 if (!CompressedOops::is_null(*list)) {
439 oop head = lrb(CompressedOops::decode_not_null(*list));
440 shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
441 oop prev = Atomic::xchg(&_pending_list, head);
442 RawAccess<>::oop_store(p, prev);
443 if (prev == nullptr) {
444 // First to prepend to list, record tail
445 _pending_list_tail = reinterpret_cast<void*>(p);
446 }
447
448 // Clear discovered list
449 set_oop_field(list, oop(nullptr));
450 }
451 }
452
453 void ShenandoahReferenceProcessor::work() {
454 // Process discovered references
455 uint max_workers = ShenandoahHeap::heap()->max_workers();
456 uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1;
457 while (worker_id < max_workers) {
458 if (UseCompressedOops) {
459 process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id);
460 } else {
461 process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id);
462 }
494 void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) {
495
496 Atomic::release_store_fence(&_iterate_discovered_list_id, 0U);
497
498 // Process discovered lists
499 ShenandoahReferenceProcessorTask task(phase, concurrent, this);
500 workers->run_task(&task);
501
502 // Update SoftReference clock
503 soft_reference_update_clock();
504
505 // Collect, log and trace statistics
506 collect_statistics();
507
508 enqueue_references(concurrent);
509 }
510
511 void ShenandoahReferenceProcessor::enqueue_references_locked() {
512 // Prepend internal pending list to external pending list
513 shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
514 if (UseCompressedOops) {
515 *reinterpret_cast<narrowOop*>(_pending_list_tail) = CompressedOops::encode(Universe::swap_reference_pending_list(_pending_list));
516 } else {
517 *reinterpret_cast<oop*>(_pending_list_tail) = Universe::swap_reference_pending_list(_pending_list);
518 }
519 }
520
521 void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
522 if (_pending_list == nullptr) {
523 // Nothing to enqueue
524 return;
525 }
526
527 if (!concurrent) {
528 // When called from mark-compact or degen-GC, the locking is done by the VMOperation,
529 enqueue_references_locked();
530 } else {
531 // Heap_lock protects external pending list
532 MonitorLocker ml(Heap_lock);
533
534 enqueue_references_locked();
535
536 // Notify ReferenceHandler thread
537 ml.notify_all();
538 }
539
540 // Reset internal pending list
541 _pending_list = nullptr;
542 _pending_list_tail = &_pending_list;
543 }
544
545 template<typename T>
546 void ShenandoahReferenceProcessor::clean_discovered_list(T* list) {
584 for (uint i = 0; i < max_workers; i++) {
585 for (size_t type = 0; type < reference_type_count; type++) {
586 encountered[type] += _ref_proc_thread_locals[i].encountered((ReferenceType)type);
587 discovered[type] += _ref_proc_thread_locals[i].discovered((ReferenceType)type);
588 enqueued[type] += _ref_proc_thread_locals[i].enqueued((ReferenceType)type);
589 }
590 }
591
592 _stats = ReferenceProcessorStats(discovered[REF_SOFT],
593 discovered[REF_WEAK],
594 discovered[REF_FINAL],
595 discovered[REF_PHANTOM]);
596
597 log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
598 encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]);
599 log_info(gc,ref)("Discovered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
600 discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]);
601 log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
602 enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]);
603 }
604
|
1 /*
2 * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "classfile/javaClasses.hpp"
29 #include "gc/shared/workerThread.hpp"
30 #include "gc/shenandoah/shenandoahGeneration.hpp"
31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
33 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "gc/shenandoah/shenandoahUtils.hpp"
36 #include "runtime/atomic.hpp"
37 #include "logging/log.hpp"
38
39 static ReferenceType reference_type(oop reference) {
40 return InstanceKlass::cast(reference->klass())->reference_type();
41 }
42
43 static const char* reference_type_name(ReferenceType type) {
44 switch (type) {
45 case REF_SOFT:
46 return "Soft";
47
48 case REF_WEAK:
49 return "Weak";
50
51 case REF_FINAL:
52 return "Final";
53
54 case REF_PHANTOM:
55 return "Phantom";
56
57 default:
58 ShouldNotReachHere();
59 return nullptr;
60 }
61 }
62
63 template <typename T>
64 static void card_mark_barrier(T* field, oop value) {
65 assert(ShenandoahCardBarrier, "Card-mark barrier should be on");
66 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
67 assert(heap->is_in_or_null(value), "Should be in heap");
68 if (heap->is_in_old(field) && heap->is_in_young(value)) {
69 // For Shenandoah, each generation collects all the _referents_ that belong to the
70 // collected generation. We can end up with discovered lists that contain a mixture
71 // of old and young _references_. These references are linked together through the
72 // discovered field in java.lang.Reference. In some cases, creating or editing this
73 // list may result in the creation of _new_ old-to-young pointers which must dirty
74 // the corresponding card. Failing to do this may cause heap verification errors and
75 // lead to incorrect GC behavior.
76 heap->old_generation()->mark_card_as_dirty(field);
77 }
78 }
79
80 template <typename T>
81 static void set_oop_field(T* field, oop value);
82
83 template <>
84 void set_oop_field<oop>(oop* field, oop value) {
85 *field = value;
86 if (ShenandoahCardBarrier) {
87 card_mark_barrier(field, value);
88 }
89 }
90
91 template <>
92 void set_oop_field<narrowOop>(narrowOop* field, oop value) {
93 *field = CompressedOops::encode(value);
94 if (ShenandoahCardBarrier) {
95 card_mark_barrier(field, value);
96 }
97 }
98
99 static oop lrb(oop obj) {
100 if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) {
101 return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
102 } else {
103 return obj;
104 }
105 }
106
107 template <typename T>
108 static volatile T* reference_referent_addr(oop reference) {
109 return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference);
110 }
111
112 template <typename T>
113 static oop reference_referent(oop reference) {
114 T heap_oop = Atomic::load(reference_referent_addr<T>(reference));
115 return CompressedOops::decode(heap_oop);
116 }
266 }
267
268 bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const {
269 if (type != REF_SOFT) {
270 // Not a SoftReference
271 return false;
272 }
273
274 // Ask SoftReference policy
275 const jlong clock = java_lang_ref_SoftReference::clock();
276 assert(clock != 0, "Clock not initialized");
277 assert(_soft_reference_policy != nullptr, "Policy not initialized");
278 return !_soft_reference_policy->should_clear_reference(reference, clock);
279 }
280
281 template <typename T>
282 bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType type) const {
283 T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference);
284 T heap_oop = RawAccess<>::oop_load(referent_addr);
285 oop referent = CompressedOops::decode(heap_oop);
286 ShenandoahHeap* heap = ShenandoahHeap::heap();
287
288 if (is_inactive<T>(reference, referent, type)) {
289 log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference));
290 return false;
291 }
292
293 if (is_strongly_live(referent)) {
294 log_trace(gc,ref)("Reference strongly live: " PTR_FORMAT, p2i(reference));
295 return false;
296 }
297
298 if (is_softly_live(reference, type)) {
299 log_trace(gc,ref)("Reference softly live: " PTR_FORMAT, p2i(reference));
300 return false;
301 }
302
303 if (!heap->is_in_active_generation(referent)) {
304 log_trace(gc,ref)("Referent outside of active generation: " PTR_FORMAT, p2i(referent));
305 return false;
306 }
307
308 return true;
309 }
310
311 template <typename T>
312 bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const {
313 const oop referent = reference_referent<T>(reference);
314 if (referent == nullptr) {
315 // Reference has been cleared, by a call to Reference.enqueue()
316 // or Reference.clear() from the application, which means we
317 // should drop the reference.
318 return true;
319 }
320
321 // Check if the referent is still alive, in which case we should
322 // drop the reference.
323 if (type == REF_PHANTOM) {
324 return ShenandoahHeap::heap()->complete_marking_context()->is_marked(referent);
325 } else {
326 return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(referent);
327 }
353 if (reference_discovered<T>(reference) != nullptr) {
354 // Already discovered. This can happen if the reference is marked finalizable first, and then strong,
355 // in which case it will be seen 2x by marking.
356 log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference));
357 return true;
358 }
359
360 if (type == REF_FINAL) {
361 ShenandoahMarkRefsSuperClosure* cl = _ref_proc_thread_locals[worker_id].mark_closure();
362 bool weak = cl->is_weak();
363 cl->set_weak(true);
364 if (UseCompressedOops) {
365 cl->do_oop(reinterpret_cast<narrowOop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
366 } else {
367 cl->do_oop(reinterpret_cast<oop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
368 }
369 cl->set_weak(weak);
370 }
371
372 // Add reference to discovered list
373 // Each worker thread has a private copy of refproc_data, which includes a private discovered list. This means
374 // there's no risk that a different worker thread will try to manipulate my discovered list head while I'm making
375 // reference the head of my discovered list.
376 ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id];
377 oop discovered_head = refproc_data.discovered_list_head<T>();
378 if (discovered_head == nullptr) {
379 // Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their
380 // discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered
381 discovered_head = reference;
382 }
383 if (reference_cas_discovered<T>(reference, discovered_head)) {
384 // We successfully set this reference object's next pointer to discovered_head. This marks reference as discovered.
385 // If reference_cas_discovered fails, that means some other worker thread took credit for discovery of this reference,
386 // and that other thread will place reference on its discovered list, so I can ignore reference.
387
388 // In case we have created an interesting pointer, mark the remembered set card as dirty.
389 if (ShenandoahCardBarrier) {
390 T* addr = reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference));
391 card_mark_barrier(addr, discovered_head);
392 }
393
394 // Make the discovered_list_head point to reference.
395 refproc_data.set_discovered_list_head<T>(reference);
396 assert(refproc_data.discovered_list_head<T>() == reference, "reference must be new discovered head");
397 log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
398 _ref_proc_thread_locals[worker_id].inc_discovered(type);
399 }
400 return true;
401 }
402
403 bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceType type) {
404 if (!RegisterReferences) {
405 // Reference processing disabled
406 return false;
407 }
408
409 log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s, %s)",
410 p2i(reference), reference_type_name(type), ShenandoahHeap::heap()->heap_region_containing(reference)->affiliation_name());
411 uint worker_id = WorkerThread::worker_id();
412 _ref_proc_thread_locals[worker_id].inc_encountered(type);
413
414 if (UseCompressedOops) {
415 return discover<narrowOop>(reference, type, worker_id);
416 } else {
417 return discover<oop>(reference, type, worker_id);
418 }
419 }
420
421 template <typename T>
422 oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) {
423 log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
424
425 ShenandoahHeap* heap = ShenandoahHeap::heap();
426 oop referent = reference_referent<T>(reference);
427 assert(referent == nullptr || heap->marking_context()->is_marked(referent), "only drop references with alive referents");
428
429 // Unlink and return next in list
430 oop next = reference_discovered<T>(reference);
431 reference_set_discovered<T>(reference, nullptr);
432 // When this reference was discovered, it would not have been marked. If it ends up surviving
433 // the cycle, we need to dirty the card if the reference is old and the referent is young. Note
434 // that if the reference is not dropped, then its pointer to the referent will be nulled before
435 // evacuation begins so card does not need to be dirtied.
436 if (ShenandoahCardBarrier) {
437 card_mark_barrier(cast_from_oop<HeapWord*>(reference), referent);
438 }
439 return next;
440 }
441
442 template <typename T>
443 T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) {
444 log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
445
446 // Update statistics
447 _ref_proc_thread_locals[worker_id].inc_enqueued(type);
448
449 // Make reference inactive
450 make_inactive<T>(reference, type);
451
452 // Return next in list
453 return reference_discovered_addr<T>(reference);
454 }
455
456 template <typename T>
457 void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) {
458 log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>()));
470 break;
471 }
472 log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference));
473 const ReferenceType type = reference_type(reference);
474
475 if (should_drop<T>(reference, type)) {
476 set_oop_field(p, drop<T>(reference, type));
477 } else {
478 p = keep<T>(reference, type, worker_id);
479 }
480
481 const oop discovered = lrb(reference_discovered<T>(reference));
482 if (reference == discovered) {
483 // Reset terminating self-loop to null
484 reference_set_discovered<T>(reference, oop(nullptr));
485 break;
486 }
487 }
488
489 // Prepend discovered references to internal pending list
490 // set_oop_field maintains the card mark barrier as this list is constructed.
491 if (!CompressedOops::is_null(*list)) {
492 oop head = lrb(CompressedOops::decode_not_null(*list));
493 shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
494 oop prev = Atomic::xchg(&_pending_list, head);
495 set_oop_field(p, prev);
496 if (prev == nullptr) {
497 // First to prepend to list, record tail
498 _pending_list_tail = reinterpret_cast<void*>(p);
499 }
500
501 // Clear discovered list
502 set_oop_field(list, oop(nullptr));
503 }
504 }
505
506 void ShenandoahReferenceProcessor::work() {
507 // Process discovered references
508 uint max_workers = ShenandoahHeap::heap()->max_workers();
509 uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1;
510 while (worker_id < max_workers) {
511 if (UseCompressedOops) {
512 process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id);
513 } else {
514 process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id);
515 }
547 void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) {
548
549 Atomic::release_store_fence(&_iterate_discovered_list_id, 0U);
550
551 // Process discovered lists
552 ShenandoahReferenceProcessorTask task(phase, concurrent, this);
553 workers->run_task(&task);
554
555 // Update SoftReference clock
556 soft_reference_update_clock();
557
558 // Collect, log and trace statistics
559 collect_statistics();
560
561 enqueue_references(concurrent);
562 }
563
564 void ShenandoahReferenceProcessor::enqueue_references_locked() {
565 // Prepend internal pending list to external pending list
566 shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
567
568 // During reference processing, we maintain a local list of references that are identified by
569 // _pending_list and _pending_list_tail. _pending_list_tail points to the next field of the last Reference object on
570 // the local list.
571 //
572 // There is also a global list of reference identified by Universe::_reference_pending_list
573
574 // The following code has the effect of:
575 // 1. Making the global Universe::_reference_pending_list point to my local list
576 // 2. Overwriting the next field of the last Reference on my local list to point at the previous head of the
577 // global Universe::_reference_pending_list
578
579 oop former_head_of_global_list = Universe::swap_reference_pending_list(_pending_list);
580 if (UseCompressedOops) {
581 set_oop_field<narrowOop>(reinterpret_cast<narrowOop*>(_pending_list_tail), former_head_of_global_list);
582 } else {
583 set_oop_field<oop>(reinterpret_cast<oop*>(_pending_list_tail), former_head_of_global_list);
584 }
585 }
586
587 void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
588 if (_pending_list == nullptr) {
589 // Nothing to enqueue
590 return;
591 }
592 if (!concurrent) {
593 // When called from mark-compact or degen-GC, the locking is done by the VMOperation,
594 enqueue_references_locked();
595 } else {
596 // Heap_lock protects external pending list
597 MonitorLocker ml(Heap_lock);
598
599 enqueue_references_locked();
600
601 // Notify ReferenceHandler thread
602 ml.notify_all();
603 }
604
605 // Reset internal pending list
606 _pending_list = nullptr;
607 _pending_list_tail = &_pending_list;
608 }
609
610 template<typename T>
611 void ShenandoahReferenceProcessor::clean_discovered_list(T* list) {
649 for (uint i = 0; i < max_workers; i++) {
650 for (size_t type = 0; type < reference_type_count; type++) {
651 encountered[type] += _ref_proc_thread_locals[i].encountered((ReferenceType)type);
652 discovered[type] += _ref_proc_thread_locals[i].discovered((ReferenceType)type);
653 enqueued[type] += _ref_proc_thread_locals[i].enqueued((ReferenceType)type);
654 }
655 }
656
657 _stats = ReferenceProcessorStats(discovered[REF_SOFT],
658 discovered[REF_WEAK],
659 discovered[REF_FINAL],
660 discovered[REF_PHANTOM]);
661
662 log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
663 encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]);
664 log_info(gc,ref)("Discovered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
665 discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]);
666 log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
667 enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]);
668 }
|