1 /* 2 * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "classfile/javaClasses.hpp" 29 #include "gc/shared/workerThread.hpp" 30 #include "gc/shenandoah/shenandoahGeneration.hpp" 31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 33 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 35 #include "gc/shenandoah/shenandoahUtils.hpp" 36 #include "runtime/atomic.hpp" 37 #include "logging/log.hpp" 38 39 static ReferenceType reference_type(oop reference) { 40 return InstanceKlass::cast(reference->klass())->reference_type(); 41 } 42 43 static const char* reference_type_name(ReferenceType type) { 44 switch (type) { 45 case REF_SOFT: 46 return "Soft"; 47 48 case REF_WEAK: 49 return "Weak"; 50 51 case REF_FINAL: 52 return "Final"; 53 54 case REF_PHANTOM: 55 return "Phantom"; 56 57 default: 58 ShouldNotReachHere(); 59 return nullptr; 60 } 61 } 62 63 template <typename T> 64 static void card_mark_barrier(T* field, oop value) { 65 assert(ShenandoahCardBarrier, "Card-mark barrier should be on"); 66 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 67 assert(heap->is_in_or_null(value), "Should be in heap"); 68 if (heap->is_in_old(field) && heap->is_in_young(value)) { 69 // For Shenandoah, each generation collects all the _referents_ that belong to the 70 // collected generation. We can end up with discovered lists that contain a mixture 71 // of old and young _references_. These references are linked together through the 72 // discovered field in java.lang.Reference. In some cases, creating or editing this 73 // list may result in the creation of _new_ old-to-young pointers which must dirty 74 // the corresponding card. Failing to do this may cause heap verification errors and 75 // lead to incorrect GC behavior. 76 heap->old_generation()->mark_card_as_dirty(field); 77 } 78 } 79 80 template <typename T> 81 static void set_oop_field(T* field, oop value); 82 83 template <> 84 void set_oop_field<oop>(oop* field, oop value) { 85 *field = value; 86 if (ShenandoahCardBarrier) { 87 card_mark_barrier(field, value); 88 } 89 } 90 91 template <> 92 void set_oop_field<narrowOop>(narrowOop* field, oop value) { 93 *field = CompressedOops::encode(value); 94 if (ShenandoahCardBarrier) { 95 card_mark_barrier(field, value); 96 } 97 } 98 99 static oop lrb(oop obj) { 100 if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) { 101 return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj); 102 } else { 103 return obj; 104 } 105 } 106 107 template <typename T> 108 static volatile T* reference_referent_addr(oop reference) { 109 return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference); 110 } 111 112 template <typename T> 113 static oop reference_referent(oop reference) { 114 T heap_oop = Atomic::load(reference_referent_addr<T>(reference)); 115 return CompressedOops::decode(heap_oop); 116 } 117 118 static void reference_clear_referent(oop reference) { 119 java_lang_ref_Reference::clear_referent_raw(reference); 120 } 121 122 template <typename T> 123 static T* reference_discovered_addr(oop reference) { 124 return reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference)); 125 } 126 127 template <typename T> 128 static oop reference_discovered(oop reference) { 129 T heap_oop = *reference_discovered_addr<T>(reference); 130 return lrb(CompressedOops::decode(heap_oop)); 131 } 132 133 template <typename T> 134 static void reference_set_discovered(oop reference, oop discovered); 135 136 template <> 137 void reference_set_discovered<oop>(oop reference, oop discovered) { 138 *reference_discovered_addr<oop>(reference) = discovered; 139 } 140 141 template <> 142 void reference_set_discovered<narrowOop>(oop reference, oop discovered) { 143 *reference_discovered_addr<narrowOop>(reference) = CompressedOops::encode(discovered); 144 } 145 146 template<typename T> 147 static bool reference_cas_discovered(oop reference, oop discovered) { 148 T* addr = reinterpret_cast<T *>(java_lang_ref_Reference::discovered_addr_raw(reference)); 149 return ShenandoahHeap::atomic_update_oop_check(discovered, addr, nullptr); 150 } 151 152 template <typename T> 153 static T* reference_next_addr(oop reference) { 154 return reinterpret_cast<T*>(java_lang_ref_Reference::next_addr_raw(reference)); 155 } 156 157 template <typename T> 158 static oop reference_next(oop reference) { 159 T heap_oop = RawAccess<>::oop_load(reference_next_addr<T>(reference)); 160 return lrb(CompressedOops::decode(heap_oop)); 161 } 162 163 static void reference_set_next(oop reference, oop next) { 164 java_lang_ref_Reference::set_next_raw(reference, next); 165 } 166 167 static void soft_reference_update_clock() { 168 const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 169 java_lang_ref_SoftReference::set_clock(now); 170 } 171 172 ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() : 173 _discovered_list(nullptr), 174 _encountered_count(), 175 _discovered_count(), 176 _enqueued_count() { 177 } 178 179 void ShenandoahRefProcThreadLocal::reset() { 180 _discovered_list = nullptr; 181 _mark_closure = nullptr; 182 for (uint i = 0; i < reference_type_count; i++) { 183 _encountered_count[i] = 0; 184 _discovered_count[i] = 0; 185 _enqueued_count[i] = 0; 186 } 187 } 188 189 template <typename T> 190 T* ShenandoahRefProcThreadLocal::discovered_list_addr() { 191 return reinterpret_cast<T*>(&_discovered_list); 192 } 193 194 template <> 195 oop ShenandoahRefProcThreadLocal::discovered_list_head<oop>() const { 196 return *reinterpret_cast<const oop*>(&_discovered_list); 197 } 198 199 template <> 200 oop ShenandoahRefProcThreadLocal::discovered_list_head<narrowOop>() const { 201 return CompressedOops::decode(*reinterpret_cast<const narrowOop*>(&_discovered_list)); 202 } 203 204 template <> 205 void ShenandoahRefProcThreadLocal::set_discovered_list_head<narrowOop>(oop head) { 206 *discovered_list_addr<narrowOop>() = CompressedOops::encode(head); 207 } 208 209 template <> 210 void ShenandoahRefProcThreadLocal::set_discovered_list_head<oop>(oop head) { 211 *discovered_list_addr<oop>() = head; 212 } 213 214 ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(uint max_workers) : 215 _soft_reference_policy(nullptr), 216 _ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)), 217 _pending_list(nullptr), 218 _pending_list_tail(&_pending_list), 219 _iterate_discovered_list_id(0U), 220 _stats() { 221 for (size_t i = 0; i < max_workers; i++) { 222 _ref_proc_thread_locals[i].reset(); 223 } 224 } 225 226 void ShenandoahReferenceProcessor::reset_thread_locals() { 227 uint max_workers = ShenandoahHeap::heap()->max_workers(); 228 for (uint i = 0; i < max_workers; i++) { 229 _ref_proc_thread_locals[i].reset(); 230 } 231 } 232 233 void ShenandoahReferenceProcessor::set_mark_closure(uint worker_id, ShenandoahMarkRefsSuperClosure* mark_closure) { 234 _ref_proc_thread_locals[worker_id].set_mark_closure(mark_closure); 235 } 236 237 void ShenandoahReferenceProcessor::set_soft_reference_policy(bool clear) { 238 static AlwaysClearPolicy always_clear_policy; 239 static LRUMaxHeapPolicy lru_max_heap_policy; 240 241 if (clear) { 242 log_info(gc, ref)("Clearing All SoftReferences"); 243 _soft_reference_policy = &always_clear_policy; 244 } else { 245 _soft_reference_policy = &lru_max_heap_policy; 246 } 247 248 _soft_reference_policy->setup(); 249 } 250 251 template <typename T> 252 bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const { 253 if (type == REF_FINAL) { 254 // A FinalReference is inactive if its next field is non-null. An application can't 255 // call enqueue() or clear() on a FinalReference. 256 return reference_next<T>(reference) != nullptr; 257 } else { 258 // A non-FinalReference is inactive if the referent is null. The referent can only 259 // be null if the application called Reference.enqueue() or Reference.clear(). 260 return referent == nullptr; 261 } 262 } 263 264 bool ShenandoahReferenceProcessor::is_strongly_live(oop referent) const { 265 return ShenandoahHeap::heap()->marking_context()->is_marked_strong(referent); 266 } 267 268 bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const { 269 if (type != REF_SOFT) { 270 // Not a SoftReference 271 return false; 272 } 273 274 // Ask SoftReference policy 275 const jlong clock = java_lang_ref_SoftReference::clock(); 276 assert(clock != 0, "Clock not initialized"); 277 assert(_soft_reference_policy != nullptr, "Policy not initialized"); 278 return !_soft_reference_policy->should_clear_reference(reference, clock); 279 } 280 281 template <typename T> 282 bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType type) const { 283 T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference); 284 T heap_oop = RawAccess<>::oop_load(referent_addr); 285 oop referent = CompressedOops::decode(heap_oop); 286 ShenandoahHeap* heap = ShenandoahHeap::heap(); 287 288 if (is_inactive<T>(reference, referent, type)) { 289 log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference)); 290 return false; 291 } 292 293 if (is_strongly_live(referent)) { 294 log_trace(gc,ref)("Reference strongly live: " PTR_FORMAT, p2i(reference)); 295 return false; 296 } 297 298 if (is_softly_live(reference, type)) { 299 log_trace(gc,ref)("Reference softly live: " PTR_FORMAT, p2i(reference)); 300 return false; 301 } 302 303 if (!heap->is_in_active_generation(referent)) { 304 log_trace(gc,ref)("Referent outside of active generation: " PTR_FORMAT, p2i(referent)); 305 return false; 306 } 307 308 return true; 309 } 310 311 template <typename T> 312 bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const { 313 const oop referent = reference_referent<T>(reference); 314 if (referent == nullptr) { 315 // Reference has been cleared, by a call to Reference.enqueue() 316 // or Reference.clear() from the application, which means we 317 // should drop the reference. 318 return true; 319 } 320 321 // Check if the referent is still alive, in which case we should 322 // drop the reference. 323 if (type == REF_PHANTOM) { 324 return ShenandoahHeap::heap()->complete_marking_context()->is_marked(referent); 325 } else { 326 return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(referent); 327 } 328 } 329 330 template <typename T> 331 void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType type) const { 332 if (type == REF_FINAL) { 333 // Don't clear referent. It is needed by the Finalizer thread to make the call 334 // to finalize(). A FinalReference is instead made inactive by self-looping the 335 // next field. An application can't call FinalReference.enqueue(), so there is 336 // no race to worry about when setting the next field. 337 assert(reference_next<T>(reference) == nullptr, "Already inactive"); 338 assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent<T>(reference)), "only make inactive final refs with alive referents"); 339 reference_set_next(reference, reference); 340 } else { 341 // Clear referent 342 reference_clear_referent(reference); 343 } 344 } 345 346 template <typename T> 347 bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, uint worker_id) { 348 if (!should_discover<T>(reference, type)) { 349 // Not discovered 350 return false; 351 } 352 353 if (reference_discovered<T>(reference) != nullptr) { 354 // Already discovered. This can happen if the reference is marked finalizable first, and then strong, 355 // in which case it will be seen 2x by marking. 356 log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference)); 357 return true; 358 } 359 360 if (type == REF_FINAL) { 361 ShenandoahMarkRefsSuperClosure* cl = _ref_proc_thread_locals[worker_id].mark_closure(); 362 bool weak = cl->is_weak(); 363 cl->set_weak(true); 364 if (UseCompressedOops) { 365 cl->do_oop(reinterpret_cast<narrowOop*>(java_lang_ref_Reference::referent_addr_raw(reference))); 366 } else { 367 cl->do_oop(reinterpret_cast<oop*>(java_lang_ref_Reference::referent_addr_raw(reference))); 368 } 369 cl->set_weak(weak); 370 } 371 372 // Add reference to discovered list 373 // Each worker thread has a private copy of refproc_data, which includes a private discovered list. This means 374 // there's no risk that a different worker thread will try to manipulate my discovered list head while I'm making 375 // reference the head of my discovered list. 376 ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id]; 377 oop discovered_head = refproc_data.discovered_list_head<T>(); 378 if (discovered_head == nullptr) { 379 // Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their 380 // discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered 381 discovered_head = reference; 382 } 383 if (reference_cas_discovered<T>(reference, discovered_head)) { 384 // We successfully set this reference object's next pointer to discovered_head. This marks reference as discovered. 385 // If reference_cas_discovered fails, that means some other worker thread took credit for discovery of this reference, 386 // and that other thread will place reference on its discovered list, so I can ignore reference. 387 388 // In case we have created an interesting pointer, mark the remembered set card as dirty. 389 if (ShenandoahCardBarrier) { 390 T* addr = reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference)); 391 card_mark_barrier(addr, discovered_head); 392 } 393 394 // Make the discovered_list_head point to reference. 395 refproc_data.set_discovered_list_head<T>(reference); 396 assert(refproc_data.discovered_list_head<T>() == reference, "reference must be new discovered head"); 397 log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 398 _ref_proc_thread_locals[worker_id].inc_discovered(type); 399 } 400 return true; 401 } 402 403 bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceType type) { 404 if (!RegisterReferences) { 405 // Reference processing disabled 406 return false; 407 } 408 409 log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s, %s)", 410 p2i(reference), reference_type_name(type), ShenandoahHeap::heap()->heap_region_containing(reference)->affiliation_name()); 411 uint worker_id = WorkerThread::worker_id(); 412 _ref_proc_thread_locals[worker_id].inc_encountered(type); 413 414 if (UseCompressedOops) { 415 return discover<narrowOop>(reference, type, worker_id); 416 } else { 417 return discover<oop>(reference, type, worker_id); 418 } 419 } 420 421 template <typename T> 422 oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) { 423 log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 424 425 ShenandoahHeap* heap = ShenandoahHeap::heap(); 426 oop referent = reference_referent<T>(reference); 427 assert(referent == nullptr || heap->marking_context()->is_marked(referent), "only drop references with alive referents"); 428 429 // Unlink and return next in list 430 oop next = reference_discovered<T>(reference); 431 reference_set_discovered<T>(reference, nullptr); 432 // When this reference was discovered, it would not have been marked. If it ends up surviving 433 // the cycle, we need to dirty the card if the reference is old and the referent is young. Note 434 // that if the reference is not dropped, then its pointer to the referent will be nulled before 435 // evacuation begins so card does not need to be dirtied. 436 if (heap->mode()->is_generational() && heap->is_in_old(reference) && heap->is_in_young(referent)) { 437 // Note: would be sufficient to mark only the card that holds the start of this Reference object. 438 heap->old_generation()->card_scan()->mark_range_as_dirty(cast_from_oop<HeapWord*>(reference), reference->size()); 439 } 440 return next; 441 } 442 443 template <typename T> 444 T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) { 445 log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 446 447 // Update statistics 448 _ref_proc_thread_locals[worker_id].inc_enqueued(type); 449 450 // Make reference inactive 451 make_inactive<T>(reference, type); 452 453 // Return next in list 454 return reference_discovered_addr<T>(reference); 455 } 456 457 template <typename T> 458 void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) { 459 log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>())); 460 T* list = refproc_data.discovered_list_addr<T>(); 461 // The list head is basically a GC root, we need to resolve and update it, 462 // otherwise we will later swap a from-space ref into Universe::pending_list(). 463 if (!CompressedOops::is_null(*list)) { 464 oop first_resolved = lrb(CompressedOops::decode_not_null(*list)); 465 set_oop_field(list, first_resolved); 466 } 467 T* p = list; 468 while (true) { 469 const oop reference = lrb(CompressedOops::decode(*p)); 470 if (reference == nullptr) { 471 break; 472 } 473 log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference)); 474 const ReferenceType type = reference_type(reference); 475 476 if (should_drop<T>(reference, type)) { 477 set_oop_field(p, drop<T>(reference, type)); 478 } else { 479 p = keep<T>(reference, type, worker_id); 480 } 481 482 const oop discovered = lrb(reference_discovered<T>(reference)); 483 if (reference == discovered) { 484 // Reset terminating self-loop to null 485 reference_set_discovered<T>(reference, oop(nullptr)); 486 break; 487 } 488 } 489 490 // Prepend discovered references to internal pending list 491 // set_oop_field maintains the card mark barrier as this list is constructed. 492 if (!CompressedOops::is_null(*list)) { 493 oop head = lrb(CompressedOops::decode_not_null(*list)); 494 shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); 495 oop prev = Atomic::xchg(&_pending_list, head); 496 set_oop_field(p, prev); 497 if (prev == nullptr) { 498 // First to prepend to list, record tail 499 _pending_list_tail = reinterpret_cast<void*>(p); 500 } 501 502 // Clear discovered list 503 set_oop_field(list, oop(nullptr)); 504 } 505 } 506 507 void ShenandoahReferenceProcessor::work() { 508 // Process discovered references 509 uint max_workers = ShenandoahHeap::heap()->max_workers(); 510 uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; 511 while (worker_id < max_workers) { 512 if (UseCompressedOops) { 513 process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id); 514 } else { 515 process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id); 516 } 517 worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; 518 } 519 } 520 521 class ShenandoahReferenceProcessorTask : public WorkerTask { 522 private: 523 bool const _concurrent; 524 ShenandoahPhaseTimings::Phase const _phase; 525 ShenandoahReferenceProcessor* const _reference_processor; 526 527 public: 528 ShenandoahReferenceProcessorTask(ShenandoahPhaseTimings::Phase phase, bool concurrent, ShenandoahReferenceProcessor* reference_processor) : 529 WorkerTask("ShenandoahReferenceProcessorTask"), 530 _concurrent(concurrent), 531 _phase(phase), 532 _reference_processor(reference_processor) { 533 } 534 535 virtual void work(uint worker_id) { 536 if (_concurrent) { 537 ShenandoahConcurrentWorkerSession worker_session(worker_id); 538 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); 539 _reference_processor->work(); 540 } else { 541 ShenandoahParallelWorkerSession worker_session(worker_id); 542 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); 543 _reference_processor->work(); 544 } 545 } 546 }; 547 548 void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) { 549 550 Atomic::release_store_fence(&_iterate_discovered_list_id, 0U); 551 552 // Process discovered lists 553 ShenandoahReferenceProcessorTask task(phase, concurrent, this); 554 workers->run_task(&task); 555 556 // Update SoftReference clock 557 soft_reference_update_clock(); 558 559 // Collect, log and trace statistics 560 collect_statistics(); 561 562 enqueue_references(concurrent); 563 } 564 565 void ShenandoahReferenceProcessor::enqueue_references_locked() { 566 // Prepend internal pending list to external pending list 567 shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); 568 569 // During reference processing, we maintain a local list of references that are identified by 570 // _pending_list and _pending_list_tail. _pending_list_tail points to the next field of the last Reference object on 571 // the local list. 572 // 573 // There is also a global list of reference identified by Universe::_reference_pending_list 574 575 // The following code has the effect of: 576 // 1. Making the global Universe::_reference_pending_list point to my local list 577 // 2. Overwriting the next field of the last Reference on my local list to point at the previous head of the 578 // global Universe::_reference_pending_list 579 580 oop former_head_of_global_list = Universe::swap_reference_pending_list(_pending_list); 581 if (UseCompressedOops) { 582 set_oop_field<narrowOop>(reinterpret_cast<narrowOop*>(_pending_list_tail), former_head_of_global_list); 583 } else { 584 set_oop_field<oop>(reinterpret_cast<oop*>(_pending_list_tail), former_head_of_global_list); 585 } 586 } 587 588 void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) { 589 if (_pending_list == nullptr) { 590 // Nothing to enqueue 591 return; 592 } 593 if (!concurrent) { 594 // When called from mark-compact or degen-GC, the locking is done by the VMOperation, 595 enqueue_references_locked(); 596 } else { 597 // Heap_lock protects external pending list 598 MonitorLocker ml(Heap_lock); 599 600 enqueue_references_locked(); 601 602 // Notify ReferenceHandler thread 603 ml.notify_all(); 604 } 605 606 // Reset internal pending list 607 _pending_list = nullptr; 608 _pending_list_tail = &_pending_list; 609 } 610 611 template<typename T> 612 void ShenandoahReferenceProcessor::clean_discovered_list(T* list) { 613 T discovered = *list; 614 while (!CompressedOops::is_null(discovered)) { 615 oop discovered_ref = CompressedOops::decode_not_null(discovered); 616 set_oop_field<T>(list, oop(nullptr)); 617 list = reference_discovered_addr<T>(discovered_ref); 618 discovered = *list; 619 } 620 } 621 622 void ShenandoahReferenceProcessor::abandon_partial_discovery() { 623 uint max_workers = ShenandoahHeap::heap()->max_workers(); 624 for (uint index = 0; index < max_workers; index++) { 625 if (UseCompressedOops) { 626 clean_discovered_list<narrowOop>(_ref_proc_thread_locals[index].discovered_list_addr<narrowOop>()); 627 } else { 628 clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>()); 629 } 630 } 631 if (_pending_list != nullptr) { 632 oop pending = _pending_list; 633 _pending_list = nullptr; 634 if (UseCompressedOops) { 635 narrowOop* list = reference_discovered_addr<narrowOop>(pending); 636 clean_discovered_list<narrowOop>(list); 637 } else { 638 oop* list = reference_discovered_addr<oop>(pending); 639 clean_discovered_list<oop>(list); 640 } 641 } 642 _pending_list_tail = &_pending_list; 643 } 644 645 void ShenandoahReferenceProcessor::collect_statistics() { 646 Counters encountered = {}; 647 Counters discovered = {}; 648 Counters enqueued = {}; 649 uint max_workers = ShenandoahHeap::heap()->max_workers(); 650 for (uint i = 0; i < max_workers; i++) { 651 for (size_t type = 0; type < reference_type_count; type++) { 652 encountered[type] += _ref_proc_thread_locals[i].encountered((ReferenceType)type); 653 discovered[type] += _ref_proc_thread_locals[i].discovered((ReferenceType)type); 654 enqueued[type] += _ref_proc_thread_locals[i].enqueued((ReferenceType)type); 655 } 656 } 657 658 _stats = ReferenceProcessorStats(discovered[REF_SOFT], 659 discovered[REF_WEAK], 660 discovered[REF_FINAL], 661 discovered[REF_PHANTOM]); 662 663 log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 664 encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]); 665 log_info(gc,ref)("Discovered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 666 discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]); 667 log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 668 enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]); 669 }