1 /* 2 * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "classfile/javaClasses.hpp" 29 #include "gc/shared/workerThread.hpp" 30 #include "gc/shenandoah/shenandoahGeneration.hpp" 31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 33 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 35 #include "gc/shenandoah/shenandoahUtils.hpp" 36 #include "runtime/atomic.hpp" 37 #include "logging/log.hpp" 38 39 static ReferenceType reference_type(oop reference) { 40 return InstanceKlass::cast(reference->klass())->reference_type(); 41 } 42 43 static const char* reference_type_name(ReferenceType type) { 44 switch (type) { 45 case REF_SOFT: 46 return "Soft"; 47 48 case REF_WEAK: 49 return "Weak"; 50 51 case REF_FINAL: 52 return "Final"; 53 54 case REF_PHANTOM: 55 return "Phantom"; 56 57 default: 58 ShouldNotReachHere(); 59 return nullptr; 60 } 61 } 62 63 template <typename T> 64 static void card_mark_barrier(T* field, oop value) { 65 assert(ShenandoahCardBarrier, "Card-mark barrier should be on"); 66 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 67 assert(heap->is_in_or_null(value), "Should be in heap"); 68 if (heap->is_in_old(field) && heap->is_in_young(value)) { 69 // For Shenandoah, each generation collects all the _referents_ that belong to the 70 // collected generation. We can end up with discovered lists that contain a mixture 71 // of old and young _references_. These references are linked together through the 72 // discovered field in java.lang.Reference. In some cases, creating or editing this 73 // list may result in the creation of _new_ old-to-young pointers which must dirty 74 // the corresponding card. Failing to do this may cause heap verification errors and 75 // lead to incorrect GC behavior. 76 heap->old_generation()->mark_card_as_dirty(field); 77 } 78 } 79 80 template <typename T> 81 static void set_oop_field(T* field, oop value); 82 83 template <> 84 void set_oop_field<oop>(oop* field, oop value) { 85 *field = value; 86 if (ShenandoahCardBarrier) { 87 card_mark_barrier(field, value); 88 } 89 } 90 91 template <> 92 void set_oop_field<narrowOop>(narrowOop* field, oop value) { 93 *field = CompressedOops::encode(value); 94 if (ShenandoahCardBarrier) { 95 card_mark_barrier(field, value); 96 } 97 } 98 99 static oop lrb(oop obj) { 100 if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) { 101 return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj); 102 } else { 103 return obj; 104 } 105 } 106 107 template <typename T> 108 static volatile T* reference_referent_addr(oop reference) { 109 return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference); 110 } 111 112 template <typename T> 113 static oop reference_referent(oop reference) { 114 T heap_oop = Atomic::load(reference_referent_addr<T>(reference)); 115 return CompressedOops::decode(heap_oop); 116 } 117 118 static void reference_clear_referent(oop reference) { 119 java_lang_ref_Reference::clear_referent_raw(reference); 120 } 121 122 template <typename T> 123 static T* reference_discovered_addr(oop reference) { 124 return reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference)); 125 } 126 127 template <typename T> 128 static oop reference_discovered(oop reference) { 129 T heap_oop = *reference_discovered_addr<T>(reference); 130 return lrb(CompressedOops::decode(heap_oop)); 131 } 132 133 template <typename T> 134 static void reference_set_discovered(oop reference, oop discovered); 135 136 template <> 137 void reference_set_discovered<oop>(oop reference, oop discovered) { 138 *reference_discovered_addr<oop>(reference) = discovered; 139 } 140 141 template <> 142 void reference_set_discovered<narrowOop>(oop reference, oop discovered) { 143 *reference_discovered_addr<narrowOop>(reference) = CompressedOops::encode(discovered); 144 } 145 146 template<typename T> 147 static bool reference_cas_discovered(oop reference, oop discovered) { 148 T* addr = reinterpret_cast<T *>(java_lang_ref_Reference::discovered_addr_raw(reference)); 149 return ShenandoahHeap::atomic_update_oop_check(discovered, addr, nullptr); 150 } 151 152 template <typename T> 153 static T* reference_next_addr(oop reference) { 154 return reinterpret_cast<T*>(java_lang_ref_Reference::next_addr_raw(reference)); 155 } 156 157 template <typename T> 158 static oop reference_next(oop reference) { 159 T heap_oop = RawAccess<>::oop_load(reference_next_addr<T>(reference)); 160 return lrb(CompressedOops::decode(heap_oop)); 161 } 162 163 static void reference_set_next(oop reference, oop next) { 164 java_lang_ref_Reference::set_next_raw(reference, next); 165 } 166 167 static void soft_reference_update_clock() { 168 const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 169 java_lang_ref_SoftReference::set_clock(now); 170 } 171 172 ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() : 173 _discovered_list(nullptr), 174 _encountered_count(), 175 _discovered_count(), 176 _enqueued_count() { 177 } 178 179 void ShenandoahRefProcThreadLocal::reset() { 180 _discovered_list = nullptr; 181 _mark_closure = nullptr; 182 for (uint i = 0; i < reference_type_count; i++) { 183 _encountered_count[i] = 0; 184 _discovered_count[i] = 0; 185 _enqueued_count[i] = 0; 186 } 187 } 188 189 template <typename T> 190 T* ShenandoahRefProcThreadLocal::discovered_list_addr() { 191 return reinterpret_cast<T*>(&_discovered_list); 192 } 193 194 template <> 195 oop ShenandoahRefProcThreadLocal::discovered_list_head<oop>() const { 196 return *reinterpret_cast<const oop*>(&_discovered_list); 197 } 198 199 template <> 200 oop ShenandoahRefProcThreadLocal::discovered_list_head<narrowOop>() const { 201 return CompressedOops::decode(*reinterpret_cast<const narrowOop*>(&_discovered_list)); 202 } 203 204 template <> 205 void ShenandoahRefProcThreadLocal::set_discovered_list_head<narrowOop>(oop head) { 206 *discovered_list_addr<narrowOop>() = CompressedOops::encode(head); 207 } 208 209 template <> 210 void ShenandoahRefProcThreadLocal::set_discovered_list_head<oop>(oop head) { 211 *discovered_list_addr<oop>() = head; 212 } 213 214 ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(uint max_workers) : 215 _soft_reference_policy(nullptr), 216 _ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)), 217 _pending_list(nullptr), 218 _pending_list_tail(&_pending_list), 219 _iterate_discovered_list_id(0U), 220 _stats() { 221 for (size_t i = 0; i < max_workers; i++) { 222 _ref_proc_thread_locals[i].reset(); 223 } 224 } 225 226 void ShenandoahReferenceProcessor::reset_thread_locals() { 227 uint max_workers = ShenandoahHeap::heap()->max_workers(); 228 for (uint i = 0; i < max_workers; i++) { 229 _ref_proc_thread_locals[i].reset(); 230 } 231 } 232 233 void ShenandoahReferenceProcessor::set_mark_closure(uint worker_id, ShenandoahMarkRefsSuperClosure* mark_closure) { 234 _ref_proc_thread_locals[worker_id].set_mark_closure(mark_closure); 235 } 236 237 void ShenandoahReferenceProcessor::set_soft_reference_policy(bool clear) { 238 static AlwaysClearPolicy always_clear_policy; 239 static LRUMaxHeapPolicy lru_max_heap_policy; 240 241 if (clear) { 242 log_info(gc, ref)("Clearing All SoftReferences"); 243 _soft_reference_policy = &always_clear_policy; 244 } else { 245 _soft_reference_policy = &lru_max_heap_policy; 246 } 247 248 _soft_reference_policy->setup(); 249 } 250 251 template <typename T> 252 bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const { 253 if (type == REF_FINAL) { 254 // A FinalReference is inactive if its next field is non-null. An application can't 255 // call enqueue() or clear() on a FinalReference. 256 return reference_next<T>(reference) != nullptr; 257 } else { 258 // A non-FinalReference is inactive if the referent is null. The referent can only 259 // be null if the application called Reference.enqueue() or Reference.clear(). 260 return referent == nullptr; 261 } 262 } 263 264 bool ShenandoahReferenceProcessor::is_strongly_live(oop referent) const { 265 return ShenandoahHeap::heap()->marking_context()->is_marked_strong(referent); 266 } 267 268 bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const { 269 if (type != REF_SOFT) { 270 // Not a SoftReference 271 return false; 272 } 273 274 // Ask SoftReference policy 275 const jlong clock = java_lang_ref_SoftReference::clock(); 276 assert(clock != 0, "Clock not initialized"); 277 assert(_soft_reference_policy != nullptr, "Policy not initialized"); 278 return !_soft_reference_policy->should_clear_reference(reference, clock); 279 } 280 281 template <typename T> 282 bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType type) const { 283 T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference); 284 T heap_oop = RawAccess<>::oop_load(referent_addr); 285 oop referent = CompressedOops::decode(heap_oop); 286 ShenandoahHeap* heap = ShenandoahHeap::heap(); 287 288 if (is_inactive<T>(reference, referent, type)) { 289 log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference)); 290 return false; 291 } 292 293 if (is_strongly_live(referent)) { 294 log_trace(gc,ref)("Reference strongly live: " PTR_FORMAT, p2i(reference)); 295 return false; 296 } 297 298 if (is_softly_live(reference, type)) { 299 log_trace(gc,ref)("Reference softly live: " PTR_FORMAT, p2i(reference)); 300 return false; 301 } 302 303 if (!heap->is_in_active_generation(referent)) { 304 log_trace(gc,ref)("Referent outside of active generation: " PTR_FORMAT, p2i(referent)); 305 return false; 306 } 307 308 return true; 309 } 310 311 template <typename T> 312 bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const { 313 const oop referent = reference_referent<T>(reference); 314 if (referent == nullptr) { 315 // Reference has been cleared, by a call to Reference.enqueue() 316 // or Reference.clear() from the application, which means we 317 // should drop the reference. 318 return true; 319 } 320 321 // Check if the referent is still alive, in which case we should 322 // drop the reference. 323 if (type == REF_PHANTOM) { 324 return ShenandoahHeap::heap()->complete_marking_context()->is_marked(referent); 325 } else { 326 return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(referent); 327 } 328 } 329 330 template <typename T> 331 void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType type) const { 332 if (type == REF_FINAL) { 333 // Don't clear referent. It is needed by the Finalizer thread to make the call 334 // to finalize(). A FinalReference is instead made inactive by self-looping the 335 // next field. An application can't call FinalReference.enqueue(), so there is 336 // no race to worry about when setting the next field. 337 assert(reference_next<T>(reference) == nullptr, "Already inactive"); 338 assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent<T>(reference)), "only make inactive final refs with alive referents"); 339 reference_set_next(reference, reference); 340 } else { 341 // Clear referent 342 reference_clear_referent(reference); 343 } 344 } 345 346 template <typename T> 347 bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, uint worker_id) { 348 if (!should_discover<T>(reference, type)) { 349 // Not discovered 350 return false; 351 } 352 353 if (reference_discovered<T>(reference) != nullptr) { 354 // Already discovered. This can happen if the reference is marked finalizable first, and then strong, 355 // in which case it will be seen 2x by marking. 356 log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference)); 357 return true; 358 } 359 360 if (type == REF_FINAL) { 361 ShenandoahMarkRefsSuperClosure* cl = _ref_proc_thread_locals[worker_id].mark_closure(); 362 bool weak = cl->is_weak(); 363 cl->set_weak(true); 364 if (UseCompressedOops) { 365 cl->do_oop(reinterpret_cast<narrowOop*>(java_lang_ref_Reference::referent_addr_raw(reference))); 366 } else { 367 cl->do_oop(reinterpret_cast<oop*>(java_lang_ref_Reference::referent_addr_raw(reference))); 368 } 369 cl->set_weak(weak); 370 } 371 372 // Add reference to discovered list 373 // Each worker thread has a private copy of refproc_data, which includes a private discovered list. This means 374 // there's no risk that a different worker thread will try to manipulate my discovered list head while I'm making 375 // reference the head of my discovered list. 376 ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id]; 377 oop discovered_head = refproc_data.discovered_list_head<T>(); 378 if (discovered_head == nullptr) { 379 // Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their 380 // discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered 381 discovered_head = reference; 382 } 383 if (reference_cas_discovered<T>(reference, discovered_head)) { 384 // We successfully set this reference object's next pointer to discovered_head. This marks reference as discovered. 385 // If reference_cas_discovered fails, that means some other worker thread took credit for discovery of this reference, 386 // and that other thread will place reference on its discovered list, so I can ignore reference. 387 388 // In case we have created an interesting pointer, mark the remembered set card as dirty. 389 if (ShenandoahCardBarrier) { 390 T* addr = reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference)); 391 card_mark_barrier(addr, discovered_head); 392 } 393 394 // Make the discovered_list_head point to reference. 395 refproc_data.set_discovered_list_head<T>(reference); 396 assert(refproc_data.discovered_list_head<T>() == reference, "reference must be new discovered head"); 397 log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 398 _ref_proc_thread_locals[worker_id].inc_discovered(type); 399 } 400 return true; 401 } 402 403 bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceType type) { 404 if (!RegisterReferences) { 405 // Reference processing disabled 406 return false; 407 } 408 409 log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s, %s)", 410 p2i(reference), reference_type_name(type), ShenandoahHeap::heap()->heap_region_containing(reference)->affiliation_name()); 411 uint worker_id = WorkerThread::worker_id(); 412 _ref_proc_thread_locals[worker_id].inc_encountered(type); 413 414 if (UseCompressedOops) { 415 return discover<narrowOop>(reference, type, worker_id); 416 } else { 417 return discover<oop>(reference, type, worker_id); 418 } 419 } 420 421 template <typename T> 422 oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) { 423 log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 424 425 ShenandoahHeap* heap = ShenandoahHeap::heap(); 426 oop referent = reference_referent<T>(reference); 427 assert(referent == nullptr || heap->marking_context()->is_marked(referent), "only drop references with alive referents"); 428 429 // Unlink and return next in list 430 oop next = reference_discovered<T>(reference); 431 reference_set_discovered<T>(reference, nullptr); 432 // When this reference was discovered, it would not have been marked. If it ends up surviving 433 // the cycle, we need to dirty the card if the reference is old and the referent is young. Note 434 // that if the reference is not dropped, then its pointer to the referent will be nulled before 435 // evacuation begins so card does not need to be dirtied. 436 if (ShenandoahCardBarrier) { 437 card_mark_barrier(cast_from_oop<HeapWord*>(reference), referent); 438 } 439 return next; 440 } 441 442 template <typename T> 443 T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) { 444 log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 445 446 // Update statistics 447 _ref_proc_thread_locals[worker_id].inc_enqueued(type); 448 449 // Make reference inactive 450 make_inactive<T>(reference, type); 451 452 // Return next in list 453 return reference_discovered_addr<T>(reference); 454 } 455 456 template <typename T> 457 void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) { 458 log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>())); 459 T* list = refproc_data.discovered_list_addr<T>(); 460 // The list head is basically a GC root, we need to resolve and update it, 461 // otherwise we will later swap a from-space ref into Universe::pending_list(). 462 if (!CompressedOops::is_null(*list)) { 463 oop first_resolved = lrb(CompressedOops::decode_not_null(*list)); 464 set_oop_field(list, first_resolved); 465 } 466 T* p = list; 467 while (true) { 468 const oop reference = lrb(CompressedOops::decode(*p)); 469 if (reference == nullptr) { 470 break; 471 } 472 log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference)); 473 const ReferenceType type = reference_type(reference); 474 475 if (should_drop<T>(reference, type)) { 476 set_oop_field(p, drop<T>(reference, type)); 477 } else { 478 p = keep<T>(reference, type, worker_id); 479 } 480 481 const oop discovered = lrb(reference_discovered<T>(reference)); 482 if (reference == discovered) { 483 // Reset terminating self-loop to null 484 reference_set_discovered<T>(reference, oop(nullptr)); 485 break; 486 } 487 } 488 489 // Prepend discovered references to internal pending list 490 // set_oop_field maintains the card mark barrier as this list is constructed. 491 if (!CompressedOops::is_null(*list)) { 492 oop head = lrb(CompressedOops::decode_not_null(*list)); 493 shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); 494 oop prev = Atomic::xchg(&_pending_list, head); 495 set_oop_field(p, prev); 496 if (prev == nullptr) { 497 // First to prepend to list, record tail 498 _pending_list_tail = reinterpret_cast<void*>(p); 499 } 500 501 // Clear discovered list 502 set_oop_field(list, oop(nullptr)); 503 } 504 } 505 506 void ShenandoahReferenceProcessor::work() { 507 // Process discovered references 508 uint max_workers = ShenandoahHeap::heap()->max_workers(); 509 uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; 510 while (worker_id < max_workers) { 511 if (UseCompressedOops) { 512 process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id); 513 } else { 514 process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id); 515 } 516 worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; 517 } 518 } 519 520 class ShenandoahReferenceProcessorTask : public WorkerTask { 521 private: 522 bool const _concurrent; 523 ShenandoahPhaseTimings::Phase const _phase; 524 ShenandoahReferenceProcessor* const _reference_processor; 525 526 public: 527 ShenandoahReferenceProcessorTask(ShenandoahPhaseTimings::Phase phase, bool concurrent, ShenandoahReferenceProcessor* reference_processor) : 528 WorkerTask("ShenandoahReferenceProcessorTask"), 529 _concurrent(concurrent), 530 _phase(phase), 531 _reference_processor(reference_processor) { 532 } 533 534 virtual void work(uint worker_id) { 535 if (_concurrent) { 536 ShenandoahConcurrentWorkerSession worker_session(worker_id); 537 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); 538 _reference_processor->work(); 539 } else { 540 ShenandoahParallelWorkerSession worker_session(worker_id); 541 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); 542 _reference_processor->work(); 543 } 544 } 545 }; 546 547 void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) { 548 549 Atomic::release_store_fence(&_iterate_discovered_list_id, 0U); 550 551 // Process discovered lists 552 ShenandoahReferenceProcessorTask task(phase, concurrent, this); 553 workers->run_task(&task); 554 555 // Update SoftReference clock 556 soft_reference_update_clock(); 557 558 // Collect, log and trace statistics 559 collect_statistics(); 560 561 enqueue_references(concurrent); 562 } 563 564 void ShenandoahReferenceProcessor::enqueue_references_locked() { 565 // Prepend internal pending list to external pending list 566 shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); 567 568 // During reference processing, we maintain a local list of references that are identified by 569 // _pending_list and _pending_list_tail. _pending_list_tail points to the next field of the last Reference object on 570 // the local list. 571 // 572 // There is also a global list of reference identified by Universe::_reference_pending_list 573 574 // The following code has the effect of: 575 // 1. Making the global Universe::_reference_pending_list point to my local list 576 // 2. Overwriting the next field of the last Reference on my local list to point at the previous head of the 577 // global Universe::_reference_pending_list 578 579 oop former_head_of_global_list = Universe::swap_reference_pending_list(_pending_list); 580 if (UseCompressedOops) { 581 set_oop_field<narrowOop>(reinterpret_cast<narrowOop*>(_pending_list_tail), former_head_of_global_list); 582 } else { 583 set_oop_field<oop>(reinterpret_cast<oop*>(_pending_list_tail), former_head_of_global_list); 584 } 585 } 586 587 void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) { 588 if (_pending_list == nullptr) { 589 // Nothing to enqueue 590 return; 591 } 592 if (!concurrent) { 593 // When called from mark-compact or degen-GC, the locking is done by the VMOperation, 594 enqueue_references_locked(); 595 } else { 596 // Heap_lock protects external pending list 597 MonitorLocker ml(Heap_lock); 598 599 enqueue_references_locked(); 600 601 // Notify ReferenceHandler thread 602 ml.notify_all(); 603 } 604 605 // Reset internal pending list 606 _pending_list = nullptr; 607 _pending_list_tail = &_pending_list; 608 } 609 610 template<typename T> 611 void ShenandoahReferenceProcessor::clean_discovered_list(T* list) { 612 T discovered = *list; 613 while (!CompressedOops::is_null(discovered)) { 614 oop discovered_ref = CompressedOops::decode_not_null(discovered); 615 set_oop_field<T>(list, oop(nullptr)); 616 list = reference_discovered_addr<T>(discovered_ref); 617 discovered = *list; 618 } 619 } 620 621 void ShenandoahReferenceProcessor::abandon_partial_discovery() { 622 uint max_workers = ShenandoahHeap::heap()->max_workers(); 623 for (uint index = 0; index < max_workers; index++) { 624 if (UseCompressedOops) { 625 clean_discovered_list<narrowOop>(_ref_proc_thread_locals[index].discovered_list_addr<narrowOop>()); 626 } else { 627 clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>()); 628 } 629 } 630 if (_pending_list != nullptr) { 631 oop pending = _pending_list; 632 _pending_list = nullptr; 633 if (UseCompressedOops) { 634 narrowOop* list = reference_discovered_addr<narrowOop>(pending); 635 clean_discovered_list<narrowOop>(list); 636 } else { 637 oop* list = reference_discovered_addr<oop>(pending); 638 clean_discovered_list<oop>(list); 639 } 640 } 641 _pending_list_tail = &_pending_list; 642 } 643 644 void ShenandoahReferenceProcessor::collect_statistics() { 645 Counters encountered = {}; 646 Counters discovered = {}; 647 Counters enqueued = {}; 648 uint max_workers = ShenandoahHeap::heap()->max_workers(); 649 for (uint i = 0; i < max_workers; i++) { 650 for (size_t type = 0; type < reference_type_count; type++) { 651 encountered[type] += _ref_proc_thread_locals[i].encountered((ReferenceType)type); 652 discovered[type] += _ref_proc_thread_locals[i].discovered((ReferenceType)type); 653 enqueued[type] += _ref_proc_thread_locals[i].enqueued((ReferenceType)type); 654 } 655 } 656 657 _stats = ReferenceProcessorStats(discovered[REF_SOFT], 658 discovered[REF_WEAK], 659 discovered[REF_FINAL], 660 discovered[REF_PHANTOM]); 661 662 log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 663 encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]); 664 log_info(gc,ref)("Discovered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 665 discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]); 666 log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 667 enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]); 668 }