1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc_implementation/shared/gcTimer.hpp" 29 #include "gc_implementation/shared/gcTraceTime.hpp" 30 #include "gc_interface/collectedHeap.hpp" 31 #include "gc_interface/collectedHeap.inline.hpp" 32 #include "memory/referencePolicy.hpp" 33 #include "memory/referenceProcessor.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/java.hpp" 36 #include "runtime/jniHandles.hpp" 37 #if INCLUDE_JFR 38 #include "jfr/jfr.hpp" 39 #endif // INCLUDE_JFR 40 41 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 42 43 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 44 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 45 bool ReferenceProcessor::_pending_list_uses_discovered_field = false; 46 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 47 48 void referenceProcessor_init() { 49 ReferenceProcessor::init_statics(); 50 } 51 52 void ReferenceProcessor::init_statics() { 53 // We need a monotonically non-deccreasing time in ms but 54 // os::javaTimeMillis() does not guarantee monotonicity. 55 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 56 57 // Initialize the soft ref timestamp clock. 58 _soft_ref_timestamp_clock = now; 59 // Also update the soft ref clock in j.l.r.SoftReference 60 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 61 62 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 63 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 64 NOT_COMPILER2(LRUCurrentHeapPolicy()); 65 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 66 vm_exit_during_initialization("Could not allocate reference policy object"); 67 } 68 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 69 RefDiscoveryPolicy == ReferentBasedDiscovery, 70 "Unrecongnized RefDiscoveryPolicy"); 71 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); 72 } 73 74 void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) { 75 #ifdef ASSERT 76 // Verify that we're not currently discovering refs 77 assert(!verify_disabled || !_discovering_refs, "nested call?"); 78 79 if (check_no_refs) { 80 // Verify that the discovered lists are empty 81 verify_no_references_recorded(); 82 } 83 #endif // ASSERT 84 85 // Someone could have modified the value of the static 86 // field in the j.l.r.SoftReference class that holds the 87 // soft reference timestamp clock using reflection or 88 // Unsafe between GCs. Unconditionally update the static 89 // field in ReferenceProcessor here so that we use the new 90 // value during reference discovery. 91 92 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 93 _discovering_refs = true; 94 } 95 96 ReferenceProcessor::ReferenceProcessor(MemRegion span, 97 bool mt_processing, 98 uint mt_processing_degree, 99 bool mt_discovery, 100 uint mt_discovery_degree, 101 bool atomic_discovery, 102 BoolObjectClosure* is_alive_non_header) : 103 _discovering_refs(false), 104 _enqueuing_is_done(false), 105 _is_alive_non_header(is_alive_non_header), 106 _processing_is_mt(mt_processing), 107 _next_id(0) 108 { 109 _span = span; 110 _discovery_is_atomic = atomic_discovery; 111 _discovery_is_mt = mt_discovery; 112 _num_q = MAX2(1U, mt_processing_degree); 113 _max_num_q = MAX2(_num_q, mt_discovery_degree); 114 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 115 _max_num_q * number_of_subclasses_of_ref(), mtGC); 116 117 if (_discovered_refs == NULL) { 118 vm_exit_during_initialization("Could not allocated RefProc Array"); 119 } 120 _discoveredSoftRefs = &_discovered_refs[0]; 121 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 122 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 123 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 124 _discoveredCleanerRefs = &_discoveredPhantomRefs[_max_num_q]; 125 126 // Initialize all entries to NULL 127 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 128 _discovered_refs[i].set_head(NULL); 129 _discovered_refs[i].set_length(0); 130 } 131 132 setup_policy(false /* default soft ref policy */); 133 } 134 135 #ifndef PRODUCT 136 void ReferenceProcessor::verify_no_references_recorded() { 137 guarantee(!_discovering_refs, "Discovering refs?"); 138 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 139 guarantee(_discovered_refs[i].is_empty(), 140 "Found non-empty discovered list"); 141 } 142 } 143 #endif 144 145 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 146 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 147 if (UseCompressedOops) { 148 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 149 } else { 150 f->do_oop((oop*)_discovered_refs[i].adr_head()); 151 } 152 } 153 } 154 155 void ReferenceProcessor::update_soft_ref_master_clock() { 156 // Update (advance) the soft ref master clock field. This must be done 157 // after processing the soft ref list. 158 159 // We need a monotonically non-deccreasing time in ms but 160 // os::javaTimeMillis() does not guarantee monotonicity. 161 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 162 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 163 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 164 165 NOT_PRODUCT( 166 if (now < _soft_ref_timestamp_clock) { 167 warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, 168 _soft_ref_timestamp_clock, now); 169 } 170 ) 171 // The values of now and _soft_ref_timestamp_clock are set using 172 // javaTimeNanos(), which is guaranteed to be monotonically 173 // non-decreasing provided the underlying platform provides such 174 // a time source (and it is bug free). 175 // In product mode, however, protect ourselves from non-monotonicty. 176 if (now > _soft_ref_timestamp_clock) { 177 _soft_ref_timestamp_clock = now; 178 java_lang_ref_SoftReference::set_clock(now); 179 } 180 // Else leave clock stalled at its old value until time progresses 181 // past clock value. 182 } 183 184 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { 185 size_t total = 0; 186 for (uint i = 0; i < _max_num_q; ++i) { 187 total += lists[i].length(); 188 } 189 return total; 190 } 191 192 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 193 BoolObjectClosure* is_alive, 194 OopClosure* keep_alive, 195 VoidClosure* complete_gc, 196 AbstractRefProcTaskExecutor* task_executor, 197 GCTimer* gc_timer, 198 GCId gc_id) { 199 NOT_PRODUCT(verify_ok_to_handle_reflists()); 200 201 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 202 // Stop treating discovered references specially. 203 disable_discovery(); 204 205 // If discovery was concurrent, someone could have modified 206 // the value of the static field in the j.l.r.SoftReference 207 // class that holds the soft reference timestamp clock using 208 // reflection or Unsafe between when discovery was enabled and 209 // now. Unconditionally update the static field in ReferenceProcessor 210 // here so that we use the new value during processing of the 211 // discovered soft refs. 212 213 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 214 215 bool trace_time = PrintGCDetails && PrintReferenceGC; 216 217 // Soft references 218 size_t soft_count = 0; 219 { 220 GCTraceTime tt("SoftReference", trace_time, false, gc_timer, gc_id); 221 soft_count = 222 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 223 is_alive, keep_alive, complete_gc, task_executor); 224 } 225 226 update_soft_ref_master_clock(); 227 228 // Weak references 229 size_t weak_count = 0; 230 { 231 GCTraceTime tt("WeakReference", trace_time, false, gc_timer, gc_id); 232 weak_count = 233 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 234 is_alive, keep_alive, complete_gc, task_executor); 235 } 236 237 // Final references 238 size_t final_count = 0; 239 { 240 GCTraceTime tt("FinalReference", trace_time, false, gc_timer, gc_id); 241 final_count = 242 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 243 is_alive, keep_alive, complete_gc, task_executor); 244 } 245 246 // Phantom references 247 size_t phantom_count = 0; 248 { 249 GCTraceTime tt("PhantomReference", trace_time, false, gc_timer, gc_id); 250 phantom_count = 251 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 252 is_alive, keep_alive, complete_gc, task_executor); 253 254 // Process cleaners, but include them in phantom statistics. We expect 255 // Cleaner references to be temporary, and don't want to deal with 256 // possible incompatibilities arising from making it more visible. 257 phantom_count += 258 process_discovered_reflist(_discoveredCleanerRefs, NULL, true, 259 is_alive, keep_alive, complete_gc, task_executor); 260 } 261 262 // Weak global JNI references. It would make more sense (semantically) to 263 // traverse these simultaneously with the regular weak references above, but 264 // that is not how the JDK1.2 specification is. See #4126360. Native code can 265 // thus use JNI weak references to circumvent the phantom references and 266 // resurrect a "post-mortem" object. 267 { 268 GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id); 269 if (task_executor != NULL) { 270 task_executor->set_single_threaded_mode(); 271 } 272 process_phaseJNI(is_alive, keep_alive, complete_gc); 273 } 274 275 return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count); 276 } 277 278 #ifndef PRODUCT 279 // Calculate the number of jni handles. 280 uint ReferenceProcessor::count_jni_refs() { 281 class CountHandleClosure: public OopClosure { 282 private: 283 int _count; 284 public: 285 CountHandleClosure(): _count(0) {} 286 void do_oop(oop* unused) { _count++; } 287 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 288 int count() { return _count; } 289 }; 290 CountHandleClosure global_handle_count; 291 JNIHandles::weak_oops_do(&global_handle_count); 292 return global_handle_count.count(); 293 } 294 #endif 295 296 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 297 OopClosure* keep_alive, 298 VoidClosure* complete_gc) { 299 #ifndef PRODUCT 300 if (PrintGCDetails && PrintReferenceGC) { 301 unsigned int count = count_jni_refs(); 302 gclog_or_tty->print(", %u refs", count); 303 } 304 #endif 305 JNIHandles::weak_oops_do(is_alive, keep_alive); 306 JFR_ONLY(Jfr::weak_oops_do(is_alive, keep_alive)); 307 complete_gc->do_void(); 308 } 309 310 311 template <class T> 312 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 313 AbstractRefProcTaskExecutor* task_executor) { 314 315 // Remember old value of pending references list 316 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 317 T old_pending_list_value = *pending_list_addr; 318 319 // Enqueue references that are not made active again, and 320 // clear the decks for the next collection (cycle). 321 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 322 // Do the post-barrier on pending_list_addr missed in 323 // enqueue_discovered_reflist. 324 oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 325 326 // Stop treating discovered references specially. 327 ref->disable_discovery(); 328 329 // Return true if new pending references were added 330 return old_pending_list_value != *pending_list_addr; 331 } 332 333 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 334 NOT_PRODUCT(verify_ok_to_handle_reflists()); 335 if (UseCompressedOops) { 336 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 337 } else { 338 return enqueue_discovered_ref_helper<oop>(this, task_executor); 339 } 340 } 341 342 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 343 HeapWord* pending_list_addr) { 344 // Given a list of refs linked through the "discovered" field 345 // (java.lang.ref.Reference.discovered), self-loop their "next" field 346 // thus distinguishing them from active References, then 347 // prepend them to the pending list. 348 // 349 // The Java threads will see the Reference objects linked together through 350 // the discovered field. Instead of trying to do the write barrier updates 351 // in all places in the reference processor where we manipulate the discovered 352 // field we make sure to do the barrier here where we anyway iterate through 353 // all linked Reference objects. Note that it is important to not dirty any 354 // cards during reference processing since this will cause card table 355 // verification to fail for G1. 356 // 357 // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777), 358 // the "next" field is used to chain the pending list, not the discovered 359 // field. 360 if (TraceReferenceGC && PrintGCDetails) { 361 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 362 INTPTR_FORMAT, (address)refs_list.head()); 363 } 364 365 oop obj = NULL; 366 oop next_d = refs_list.head(); 367 if (pending_list_uses_discovered_field()) { // New behavior 368 // Walk down the list, self-looping the next field 369 // so that the References are not considered active. 370 while (obj != next_d) { 371 obj = next_d; 372 assert(obj->is_instanceRef(), "should be reference object"); 373 next_d = java_lang_ref_Reference::discovered(obj); 374 if (TraceReferenceGC && PrintGCDetails) { 375 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 376 (void *)obj, (void *)next_d); 377 } 378 assert(java_lang_ref_Reference::next(obj) == NULL, 379 "Reference not active; should not be discovered"); 380 // Self-loop next, so as to make Ref not active. 381 java_lang_ref_Reference::set_next_raw(obj, obj); 382 if (next_d != obj) { 383 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); 384 } else { 385 // This is the last object. 386 // Swap refs_list into pending_list_addr and 387 // set obj's discovered to what we read from pending_list_addr. 388 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 389 // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above. 390 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL 391 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); 392 } 393 } 394 } else { // Old behaviour 395 // Walk down the list, copying the discovered field into 396 // the next field and clearing the discovered field. 397 while (obj != next_d) { 398 obj = next_d; 399 assert(obj->is_instanceRef(), "should be reference object"); 400 next_d = java_lang_ref_Reference::discovered(obj); 401 if (TraceReferenceGC && PrintGCDetails) { 402 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 403 (void *)obj, (void *)next_d); 404 } 405 assert(java_lang_ref_Reference::next(obj) == NULL, 406 "The reference should not be enqueued"); 407 if (next_d == obj) { // obj is last 408 // Swap refs_list into pendling_list_addr and 409 // set obj's next to what we read from pending_list_addr. 410 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 411 // Need oop_check on pending_list_addr above; 412 // see special oop-check code at the end of 413 // enqueue_discovered_reflists() further below. 414 if (old == NULL) { 415 // obj should be made to point to itself, since 416 // pending list was empty. 417 java_lang_ref_Reference::set_next(obj, obj); 418 } else { 419 java_lang_ref_Reference::set_next(obj, old); 420 } 421 } else { 422 java_lang_ref_Reference::set_next(obj, next_d); 423 } 424 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 425 } 426 } 427 } 428 429 // Parallel enqueue task 430 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 431 public: 432 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 433 DiscoveredList discovered_refs[], 434 HeapWord* pending_list_addr, 435 int n_queues) 436 : EnqueueTask(ref_processor, discovered_refs, 437 pending_list_addr, n_queues) 438 { } 439 440 virtual void work(unsigned int work_id) { 441 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 442 // Simplest first cut: static partitioning. 443 int index = work_id; 444 // The increment on "index" must correspond to the maximum number of queues 445 // (n_queues) with which that ReferenceProcessor was created. That 446 // is because of the "clever" way the discovered references lists were 447 // allocated and are indexed into. 448 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 449 for (int j = 0; 450 j < ReferenceProcessor::number_of_subclasses_of_ref(); 451 j++, index += _n_queues) { 452 _ref_processor.enqueue_discovered_reflist( 453 _refs_lists[index], _pending_list_addr); 454 _refs_lists[index].set_head(NULL); 455 _refs_lists[index].set_length(0); 456 } 457 } 458 }; 459 460 // Enqueue references that are not made active again 461 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 462 AbstractRefProcTaskExecutor* task_executor) { 463 if (_processing_is_mt && task_executor != NULL) { 464 // Parallel code 465 RefProcEnqueueTask tsk(*this, _discovered_refs, 466 pending_list_addr, _max_num_q); 467 task_executor->execute(tsk); 468 } else { 469 // Serial code: call the parent class's implementation 470 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 471 enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr); 472 _discovered_refs[i].set_head(NULL); 473 _discovered_refs[i].set_length(0); 474 } 475 } 476 } 477 478 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 479 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 480 oop discovered = java_lang_ref_Reference::discovered(_ref); 481 assert(_discovered_addr && discovered->is_oop_or_null(), 482 "discovered field is bad"); 483 _next = discovered; 484 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 485 _referent = java_lang_ref_Reference::referent(_ref); 486 assert(Universe::heap()->is_in_reserved_or_null(_referent), 487 "Wrong oop found in java.lang.Reference object"); 488 assert(allow_null_referent ? 489 _referent->is_oop_or_null() 490 : _referent->is_oop(), 491 "bad referent"); 492 } 493 494 void DiscoveredListIterator::remove() { 495 assert(_ref->is_oop(), "Dropping a bad reference"); 496 oop_store_raw(_discovered_addr, NULL); 497 498 // First _prev_next ref actually points into DiscoveredList (gross). 499 oop new_next; 500 if (_next == _ref) { 501 // At the end of the list, we should make _prev point to itself. 502 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 503 // and _prev will be NULL. 504 new_next = _prev; 505 } else { 506 new_next = _next; 507 } 508 // Remove Reference object from discovered list. Note that G1 does not need a 509 // pre-barrier here because we know the Reference has already been found/marked, 510 // that's how it ended up in the discovered list in the first place. 511 oop_store_raw(_prev_next, new_next); 512 NOT_PRODUCT(_removed++); 513 _refs_list.dec_length(1); 514 } 515 516 // Make the Reference object active again. 517 void DiscoveredListIterator::make_active() { 518 // The pre barrier for G1 is probably just needed for the old 519 // reference processing behavior. Should we guard this with 520 // ReferenceProcessor::pending_list_uses_discovered_field() ? 521 if (UseG1GC) { 522 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); 523 if (UseCompressedOops) { 524 oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL); 525 } else { 526 oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL); 527 } 528 } 529 java_lang_ref_Reference::set_next_raw(_ref, NULL); 530 } 531 532 void DiscoveredListIterator::clear_referent() { 533 oop_store_raw(_referent_addr, NULL); 534 } 535 536 // NOTE: process_phase*() are largely similar, and at a high level 537 // merely iterate over the extant list applying a predicate to 538 // each of its elements and possibly removing that element from the 539 // list and applying some further closures to that element. 540 // We should consider the possibility of replacing these 541 // process_phase*() methods by abstracting them into 542 // a single general iterator invocation that receives appropriate 543 // closures that accomplish this work. 544 545 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 546 // referents are not alive, but that should be kept alive for policy reasons. 547 // Keep alive the transitive closure of all such referents. 548 void 549 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 550 ReferencePolicy* policy, 551 BoolObjectClosure* is_alive, 552 OopClosure* keep_alive, 553 VoidClosure* complete_gc) { 554 assert(policy != NULL, "Must have a non-NULL policy"); 555 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 556 // Decide which softly reachable refs should be kept alive. 557 while (iter.has_next()) { 558 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 559 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 560 if (referent_is_dead && 561 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 562 if (TraceReferenceGC) { 563 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 564 (void *)iter.obj(), iter.obj()->klass()->internal_name()); 565 } 566 // Remove Reference object from list 567 iter.remove(); 568 // Make the Reference object active again 569 iter.make_active(); 570 // keep the referent around 571 iter.make_referent_alive(); 572 iter.move_to_next(); 573 } else { 574 iter.next(); 575 } 576 } 577 // Close the reachable set 578 complete_gc->do_void(); 579 NOT_PRODUCT( 580 if (PrintGCDetails && TraceReferenceGC) { 581 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 582 "discovered Refs by policy, from list " INTPTR_FORMAT, 583 iter.removed(), iter.processed(), (address)refs_list.head()); 584 } 585 ) 586 } 587 588 // Traverse the list and remove any Refs that are not active, or 589 // whose referents are either alive or NULL. 590 void 591 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 592 BoolObjectClosure* is_alive, 593 OopClosure* keep_alive) { 594 assert(discovery_is_atomic(), "Error"); 595 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 596 while (iter.has_next()) { 597 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 598 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 599 assert(next == NULL, "Should not discover inactive Reference"); 600 if (iter.is_referent_alive()) { 601 if (TraceReferenceGC) { 602 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 603 (void *)iter.obj(), iter.obj()->klass()->internal_name()); 604 } 605 // The referent is reachable after all. 606 // Remove Reference object from list. 607 iter.remove(); 608 // Update the referent pointer as necessary: Note that this 609 // should not entail any recursive marking because the 610 // referent must already have been traversed. 611 iter.make_referent_alive(); 612 iter.move_to_next(); 613 } else { 614 iter.next(); 615 } 616 } 617 NOT_PRODUCT( 618 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 619 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 620 "Refs in discovered list " INTPTR_FORMAT, 621 iter.removed(), iter.processed(), (address)refs_list.head()); 622 } 623 ) 624 } 625 626 void 627 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 628 BoolObjectClosure* is_alive, 629 OopClosure* keep_alive, 630 VoidClosure* complete_gc) { 631 assert(!discovery_is_atomic(), "Error"); 632 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 633 while (iter.has_next()) { 634 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 635 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 636 oop next = java_lang_ref_Reference::next(iter.obj()); 637 if ((iter.referent() == NULL || iter.is_referent_alive() || 638 next != NULL)) { 639 assert(next->is_oop_or_null(), "bad next field"); 640 // Remove Reference object from list 641 iter.remove(); 642 // Trace the cohorts 643 iter.make_referent_alive(); 644 if (UseCompressedOops) { 645 keep_alive->do_oop((narrowOop*)next_addr); 646 } else { 647 keep_alive->do_oop((oop*)next_addr); 648 } 649 iter.move_to_next(); 650 } else { 651 iter.next(); 652 } 653 } 654 // Now close the newly reachable set 655 complete_gc->do_void(); 656 NOT_PRODUCT( 657 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 658 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 659 "Refs in discovered list " INTPTR_FORMAT, 660 iter.removed(), iter.processed(), (address)refs_list.head()); 661 } 662 ) 663 } 664 665 // Traverse the list and process the referents, by either 666 // clearing them or keeping them (and their reachable 667 // closure) alive. 668 void 669 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 670 bool clear_referent, 671 BoolObjectClosure* is_alive, 672 OopClosure* keep_alive, 673 VoidClosure* complete_gc) { 674 ResourceMark rm; 675 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 676 while (iter.has_next()) { 677 iter.update_discovered(); 678 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 679 if (clear_referent) { 680 // NULL out referent pointer 681 iter.clear_referent(); 682 } else { 683 // keep the referent around 684 iter.make_referent_alive(); 685 } 686 if (TraceReferenceGC) { 687 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 688 clear_referent ? "cleared " : "", 689 (void *)iter.obj(), iter.obj()->klass()->internal_name()); 690 } 691 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 692 iter.next(); 693 } 694 // Remember to update the next pointer of the last ref. 695 iter.update_discovered(); 696 // Close the reachable set 697 complete_gc->do_void(); 698 } 699 700 void 701 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 702 oop obj = NULL; 703 oop next = refs_list.head(); 704 while (next != obj) { 705 obj = next; 706 next = java_lang_ref_Reference::discovered(obj); 707 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 708 } 709 refs_list.set_head(NULL); 710 refs_list.set_length(0); 711 } 712 713 void 714 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 715 clear_discovered_references(refs_list); 716 } 717 718 void ReferenceProcessor::abandon_partial_discovery() { 719 // loop over the lists 720 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 721 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 722 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i)); 723 } 724 abandon_partial_discovered_list(_discovered_refs[i]); 725 } 726 } 727 728 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 729 public: 730 RefProcPhase1Task(ReferenceProcessor& ref_processor, 731 DiscoveredList refs_lists[], 732 ReferencePolicy* policy, 733 bool marks_oops_alive) 734 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 735 _policy(policy) 736 { } 737 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 738 OopClosure& keep_alive, 739 VoidClosure& complete_gc) 740 { 741 Thread* thr = Thread::current(); 742 int refs_list_index = ((WorkerThread*)thr)->id(); 743 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 744 &is_alive, &keep_alive, &complete_gc); 745 } 746 private: 747 ReferencePolicy* _policy; 748 }; 749 750 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 751 public: 752 RefProcPhase2Task(ReferenceProcessor& ref_processor, 753 DiscoveredList refs_lists[], 754 bool marks_oops_alive) 755 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 756 { } 757 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 758 OopClosure& keep_alive, 759 VoidClosure& complete_gc) 760 { 761 _ref_processor.process_phase2(_refs_lists[i], 762 &is_alive, &keep_alive, &complete_gc); 763 } 764 }; 765 766 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 767 public: 768 RefProcPhase3Task(ReferenceProcessor& ref_processor, 769 DiscoveredList refs_lists[], 770 bool clear_referent, 771 bool marks_oops_alive) 772 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 773 _clear_referent(clear_referent) 774 { } 775 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 776 OopClosure& keep_alive, 777 VoidClosure& complete_gc) 778 { 779 // Don't use "refs_list_index" calculated in this way because 780 // balance_queues() has moved the Ref's into the first n queues. 781 // Thread* thr = Thread::current(); 782 // int refs_list_index = ((WorkerThread*)thr)->id(); 783 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 784 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 785 &is_alive, &keep_alive, &complete_gc); 786 } 787 private: 788 bool _clear_referent; 789 }; 790 791 // Balances reference queues. 792 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 793 // queues[0, 1, ..., _num_q-1] because only the first _num_q 794 // corresponding to the active workers will be processed. 795 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 796 { 797 // calculate total length 798 size_t total_refs = 0; 799 if (TraceReferenceGC && PrintGCDetails) { 800 gclog_or_tty->print_cr("\nBalance ref_lists "); 801 } 802 803 for (uint i = 0; i < _max_num_q; ++i) { 804 total_refs += ref_lists[i].length(); 805 if (TraceReferenceGC && PrintGCDetails) { 806 gclog_or_tty->print("%d ", ref_lists[i].length()); 807 } 808 } 809 if (TraceReferenceGC && PrintGCDetails) { 810 gclog_or_tty->print_cr(" = %d", total_refs); 811 } 812 size_t avg_refs = total_refs / _num_q + 1; 813 uint to_idx = 0; 814 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 815 bool move_all = false; 816 if (from_idx >= _num_q) { 817 move_all = ref_lists[from_idx].length() > 0; 818 } 819 while ((ref_lists[from_idx].length() > avg_refs) || 820 move_all) { 821 assert(to_idx < _num_q, "Sanity Check!"); 822 if (ref_lists[to_idx].length() < avg_refs) { 823 // move superfluous refs 824 size_t refs_to_move; 825 // Move all the Ref's if the from queue will not be processed. 826 if (move_all) { 827 refs_to_move = MIN2(ref_lists[from_idx].length(), 828 avg_refs - ref_lists[to_idx].length()); 829 } else { 830 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 831 avg_refs - ref_lists[to_idx].length()); 832 } 833 834 assert(refs_to_move > 0, "otherwise the code below will fail"); 835 836 oop move_head = ref_lists[from_idx].head(); 837 oop move_tail = move_head; 838 oop new_head = move_head; 839 // find an element to split the list on 840 for (size_t j = 0; j < refs_to_move; ++j) { 841 move_tail = new_head; 842 new_head = java_lang_ref_Reference::discovered(new_head); 843 } 844 845 // Add the chain to the to list. 846 if (ref_lists[to_idx].head() == NULL) { 847 // to list is empty. Make a loop at the end. 848 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 849 } else { 850 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 851 } 852 ref_lists[to_idx].set_head(move_head); 853 ref_lists[to_idx].inc_length(refs_to_move); 854 855 // Remove the chain from the from list. 856 if (move_tail == new_head) { 857 // We found the end of the from list. 858 ref_lists[from_idx].set_head(NULL); 859 } else { 860 ref_lists[from_idx].set_head(new_head); 861 } 862 ref_lists[from_idx].dec_length(refs_to_move); 863 if (ref_lists[from_idx].length() == 0) { 864 break; 865 } 866 } else { 867 to_idx = (to_idx + 1) % _num_q; 868 } 869 } 870 } 871 #ifdef ASSERT 872 size_t balanced_total_refs = 0; 873 for (uint i = 0; i < _max_num_q; ++i) { 874 balanced_total_refs += ref_lists[i].length(); 875 if (TraceReferenceGC && PrintGCDetails) { 876 gclog_or_tty->print("%d ", ref_lists[i].length()); 877 } 878 } 879 if (TraceReferenceGC && PrintGCDetails) { 880 gclog_or_tty->print_cr(" = %d", balanced_total_refs); 881 gclog_or_tty->flush(); 882 } 883 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 884 #endif 885 } 886 887 void ReferenceProcessor::balance_all_queues() { 888 balance_queues(_discoveredSoftRefs); 889 balance_queues(_discoveredWeakRefs); 890 balance_queues(_discoveredFinalRefs); 891 balance_queues(_discoveredPhantomRefs); 892 balance_queues(_discoveredCleanerRefs); 893 } 894 895 size_t 896 ReferenceProcessor::process_discovered_reflist( 897 DiscoveredList refs_lists[], 898 ReferencePolicy* policy, 899 bool clear_referent, 900 BoolObjectClosure* is_alive, 901 OopClosure* keep_alive, 902 VoidClosure* complete_gc, 903 AbstractRefProcTaskExecutor* task_executor) 904 { 905 bool mt_processing = task_executor != NULL && _processing_is_mt; 906 // If discovery used MT and a dynamic number of GC threads, then 907 // the queues must be balanced for correctness if fewer than the 908 // maximum number of queues were used. The number of queue used 909 // during discovery may be different than the number to be used 910 // for processing so don't depend of _num_q < _max_num_q as part 911 // of the test. 912 bool must_balance = _discovery_is_mt; 913 914 if ((mt_processing && ParallelRefProcBalancingEnabled) || 915 must_balance) { 916 balance_queues(refs_lists); 917 } 918 919 size_t total_list_count = total_count(refs_lists); 920 921 if (PrintReferenceGC && PrintGCDetails) { 922 gclog_or_tty->print(", %u refs", total_list_count); 923 } 924 925 // Phase 1 (soft refs only): 926 // . Traverse the list and remove any SoftReferences whose 927 // referents are not alive, but that should be kept alive for 928 // policy reasons. Keep alive the transitive closure of all 929 // such referents. 930 if (policy != NULL) { 931 if (mt_processing) { 932 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 933 task_executor->execute(phase1); 934 } else { 935 for (uint i = 0; i < _max_num_q; i++) { 936 process_phase1(refs_lists[i], policy, 937 is_alive, keep_alive, complete_gc); 938 } 939 } 940 } else { // policy == NULL 941 assert(refs_lists != _discoveredSoftRefs, 942 "Policy must be specified for soft references."); 943 } 944 945 // Phase 2: 946 // . Traverse the list and remove any refs whose referents are alive. 947 if (mt_processing) { 948 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 949 task_executor->execute(phase2); 950 } else { 951 for (uint i = 0; i < _max_num_q; i++) { 952 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 953 } 954 } 955 956 // Phase 3: 957 // . Traverse the list and process referents as appropriate. 958 if (mt_processing) { 959 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 960 task_executor->execute(phase3); 961 } else { 962 for (uint i = 0; i < _max_num_q; i++) { 963 process_phase3(refs_lists[i], clear_referent, 964 is_alive, keep_alive, complete_gc); 965 } 966 } 967 968 return total_list_count; 969 } 970 971 void ReferenceProcessor::clean_up_discovered_references() { 972 // loop over the lists 973 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 974 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 975 gclog_or_tty->print_cr( 976 "\nScrubbing %s discovered list of Null referents", 977 list_name(i)); 978 } 979 clean_up_discovered_reflist(_discovered_refs[i]); 980 } 981 } 982 983 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 984 assert(!discovery_is_atomic(), "Else why call this method?"); 985 DiscoveredListIterator iter(refs_list, NULL, NULL); 986 while (iter.has_next()) { 987 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 988 oop next = java_lang_ref_Reference::next(iter.obj()); 989 assert(next->is_oop_or_null(), "bad next field"); 990 // If referent has been cleared or Reference is not active, 991 // drop it. 992 if (iter.referent() == NULL || next != NULL) { 993 debug_only( 994 if (PrintGCDetails && TraceReferenceGC) { 995 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 996 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 997 " and referent: " INTPTR_FORMAT, 998 (void *)iter.obj(), (void *)next, (void *)iter.referent()); 999 } 1000 ) 1001 // Remove Reference object from list 1002 iter.remove(); 1003 iter.move_to_next(); 1004 } else { 1005 iter.next(); 1006 } 1007 } 1008 NOT_PRODUCT( 1009 if (PrintGCDetails && TraceReferenceGC) { 1010 gclog_or_tty->print( 1011 " Removed %d Refs with NULL referents out of %d discovered Refs", 1012 iter.removed(), iter.processed()); 1013 } 1014 ) 1015 } 1016 1017 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 1018 uint id = 0; 1019 // Determine the queue index to use for this object. 1020 if (_discovery_is_mt) { 1021 // During a multi-threaded discovery phase, 1022 // each thread saves to its "own" list. 1023 Thread* thr = Thread::current(); 1024 id = thr->as_Worker_thread()->id(); 1025 } else { 1026 // single-threaded discovery, we save in round-robin 1027 // fashion to each of the lists. 1028 if (_processing_is_mt) { 1029 id = next_id(); 1030 } 1031 } 1032 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 1033 1034 // Get the discovered queue to which we will add 1035 DiscoveredList* list = NULL; 1036 switch (rt) { 1037 case REF_OTHER: 1038 // Unknown reference type, no special treatment 1039 break; 1040 case REF_SOFT: 1041 list = &_discoveredSoftRefs[id]; 1042 break; 1043 case REF_WEAK: 1044 list = &_discoveredWeakRefs[id]; 1045 break; 1046 case REF_FINAL: 1047 list = &_discoveredFinalRefs[id]; 1048 break; 1049 case REF_PHANTOM: 1050 list = &_discoveredPhantomRefs[id]; 1051 break; 1052 case REF_CLEANER: 1053 list = &_discoveredCleanerRefs[id]; 1054 break; 1055 case REF_NONE: 1056 // we should not reach here if we are an InstanceRefKlass 1057 default: 1058 ShouldNotReachHere(); 1059 } 1060 if (TraceReferenceGC && PrintGCDetails) { 1061 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list); 1062 } 1063 return list; 1064 } 1065 1066 inline void 1067 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1068 oop obj, 1069 HeapWord* discovered_addr) { 1070 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1071 // First we must make sure this object is only enqueued once. CAS in a non null 1072 // discovered_addr. 1073 oop current_head = refs_list.head(); 1074 // The last ref must have its discovered field pointing to itself. 1075 oop next_discovered = (current_head != NULL) ? current_head : obj; 1076 1077 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 1078 NULL); 1079 if (retest == NULL) { 1080 // This thread just won the right to enqueue the object. 1081 // We have separate lists for enqueueing, so no synchronization 1082 // is necessary. 1083 refs_list.set_head(obj); 1084 refs_list.inc_length(1); 1085 1086 if (TraceReferenceGC) { 1087 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 1088 (void *)obj, obj->klass()->internal_name()); 1089 } 1090 } else { 1091 // If retest was non NULL, another thread beat us to it: 1092 // The reference has already been discovered... 1093 if (TraceReferenceGC) { 1094 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1095 (void *)obj, obj->klass()->internal_name()); 1096 } 1097 } 1098 } 1099 1100 #ifndef PRODUCT 1101 // Non-atomic (i.e. concurrent) discovery might allow us 1102 // to observe j.l.References with NULL referents, being those 1103 // cleared concurrently by mutators during (or after) discovery. 1104 void ReferenceProcessor::verify_referent(oop obj) { 1105 bool da = discovery_is_atomic(); 1106 oop referent = java_lang_ref_Reference::referent(obj); 1107 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 1108 err_msg("Bad referent " INTPTR_FORMAT " found in Reference " 1109 INTPTR_FORMAT " during %satomic discovery ", 1110 (void *)referent, (void *)obj, da ? "" : "non-")); 1111 } 1112 #endif 1113 1114 // We mention two of several possible choices here: 1115 // #0: if the reference object is not in the "originating generation" 1116 // (or part of the heap being collected, indicated by our "span" 1117 // we don't treat it specially (i.e. we scan it as we would 1118 // a normal oop, treating its references as strong references). 1119 // This means that references can't be discovered unless their 1120 // referent is also in the same span. This is the simplest, 1121 // most "local" and most conservative approach, albeit one 1122 // that may cause weak references to be enqueued least promptly. 1123 // We call this choice the "ReferenceBasedDiscovery" policy. 1124 // #1: the reference object may be in any generation (span), but if 1125 // the referent is in the generation (span) being currently collected 1126 // then we can discover the reference object, provided 1127 // the object has not already been discovered by 1128 // a different concurrently running collector (as may be the 1129 // case, for instance, if the reference object is in CMS and 1130 // the referent in DefNewGeneration), and provided the processing 1131 // of this reference object by the current collector will 1132 // appear atomic to every other collector in the system. 1133 // (Thus, for instance, a concurrent collector may not 1134 // discover references in other generations even if the 1135 // referent is in its own generation). This policy may, 1136 // in certain cases, enqueue references somewhat sooner than 1137 // might Policy #0 above, but at marginally increased cost 1138 // and complexity in processing these references. 1139 // We call this choice the "RefeferentBasedDiscovery" policy. 1140 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1141 // Make sure we are discovering refs (rather than processing discovered refs). 1142 if (!_discovering_refs || !RegisterReferences) { 1143 return false; 1144 } 1145 // We only discover active references. 1146 oop next = java_lang_ref_Reference::next(obj); 1147 if (next != NULL) { // Ref is no longer active 1148 return false; 1149 } 1150 1151 HeapWord* obj_addr = (HeapWord*)obj; 1152 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1153 !_span.contains(obj_addr)) { 1154 // Reference is not in the originating generation; 1155 // don't treat it specially (i.e. we want to scan it as a normal 1156 // object with strong references). 1157 return false; 1158 } 1159 1160 // We only discover references whose referents are not (yet) 1161 // known to be strongly reachable. 1162 if (is_alive_non_header() != NULL) { 1163 verify_referent(obj); 1164 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1165 return false; // referent is reachable 1166 } 1167 } 1168 if (rt == REF_SOFT) { 1169 // For soft refs we can decide now if these are not 1170 // current candidates for clearing, in which case we 1171 // can mark through them now, rather than delaying that 1172 // to the reference-processing phase. Since all current 1173 // time-stamp policies advance the soft-ref clock only 1174 // at a major collection cycle, this is always currently 1175 // accurate. 1176 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1177 return false; 1178 } 1179 } 1180 1181 ResourceMark rm; // Needed for tracing. 1182 1183 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1184 const oop discovered = java_lang_ref_Reference::discovered(obj); 1185 assert(discovered->is_oop_or_null(), "bad discovered field"); 1186 if (discovered != NULL) { 1187 // The reference has already been discovered... 1188 if (TraceReferenceGC) { 1189 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1190 (void *)obj, obj->klass()->internal_name()); 1191 } 1192 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1193 // assumes that an object is not processed twice; 1194 // if it's been already discovered it must be on another 1195 // generation's discovered list; so we won't discover it. 1196 return false; 1197 } else { 1198 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1199 "Unrecognized policy"); 1200 // Check assumption that an object is not potentially 1201 // discovered twice except by concurrent collectors that potentially 1202 // trace the same Reference object twice. 1203 assert(UseConcMarkSweepGC || UseG1GC, 1204 "Only possible with a concurrent marking collector"); 1205 return true; 1206 } 1207 } 1208 1209 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1210 verify_referent(obj); 1211 // Discover if and only if EITHER: 1212 // .. reference is in our span, OR 1213 // .. we are an atomic collector and referent is in our span 1214 if (_span.contains(obj_addr) || 1215 (discovery_is_atomic() && 1216 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1217 // should_enqueue = true; 1218 } else { 1219 return false; 1220 } 1221 } else { 1222 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1223 _span.contains(obj_addr), "code inconsistency"); 1224 } 1225 1226 // Get the right type of discovered queue head. 1227 DiscoveredList* list = get_discovered_list(rt); 1228 if (list == NULL) { 1229 return false; // nothing special needs to be done 1230 } 1231 1232 if (_discovery_is_mt) { 1233 add_to_discovered_list_mt(*list, obj, discovered_addr); 1234 } else { 1235 // We do a raw store here: the field will be visited later when processing 1236 // the discovered references. 1237 oop current_head = list->head(); 1238 // The last ref must have its discovered field pointing to itself. 1239 oop next_discovered = (current_head != NULL) ? current_head : obj; 1240 1241 assert(discovered == NULL, "control point invariant"); 1242 oop_store_raw(discovered_addr, next_discovered); 1243 list->set_head(obj); 1244 list->inc_length(1); 1245 1246 if (TraceReferenceGC) { 1247 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", 1248 (void *)obj, obj->klass()->internal_name()); 1249 } 1250 } 1251 assert(obj->is_oop(), "Discovered a bad reference"); 1252 verify_referent(obj); 1253 return true; 1254 } 1255 1256 // Preclean the discovered references by removing those 1257 // whose referents are alive, and by marking from those that 1258 // are not active. These lists can be handled here 1259 // in any order and, indeed, concurrently. 1260 void ReferenceProcessor::preclean_discovered_references( 1261 BoolObjectClosure* is_alive, 1262 OopClosure* keep_alive, 1263 VoidClosure* complete_gc, 1264 YieldClosure* yield, 1265 GCTimer* gc_timer, 1266 GCId gc_id) { 1267 1268 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1269 1270 // Soft references 1271 { 1272 GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1273 false, gc_timer, gc_id); 1274 for (uint i = 0; i < _max_num_q; i++) { 1275 if (yield->should_return()) { 1276 return; 1277 } 1278 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1279 keep_alive, complete_gc, yield); 1280 } 1281 } 1282 1283 // Weak references 1284 { 1285 GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1286 false, gc_timer, gc_id); 1287 for (uint i = 0; i < _max_num_q; i++) { 1288 if (yield->should_return()) { 1289 return; 1290 } 1291 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1292 keep_alive, complete_gc, yield); 1293 } 1294 } 1295 1296 // Final references 1297 { 1298 GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1299 false, gc_timer, gc_id); 1300 for (uint i = 0; i < _max_num_q; i++) { 1301 if (yield->should_return()) { 1302 return; 1303 } 1304 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1305 keep_alive, complete_gc, yield); 1306 } 1307 } 1308 1309 // Phantom references 1310 { 1311 GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1312 false, gc_timer, gc_id); 1313 for (uint i = 0; i < _max_num_q; i++) { 1314 if (yield->should_return()) { 1315 return; 1316 } 1317 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1318 keep_alive, complete_gc, yield); 1319 } 1320 1321 // Cleaner references. Included in timing for phantom references. We 1322 // expect Cleaner references to be temporary, and don't want to deal with 1323 // possible incompatibilities arising from making it more visible. 1324 for (uint i = 0; i < _max_num_q; i++) { 1325 if (yield->should_return()) { 1326 return; 1327 } 1328 preclean_discovered_reflist(_discoveredCleanerRefs[i], is_alive, 1329 keep_alive, complete_gc, yield); 1330 } 1331 } 1332 } 1333 1334 // Walk the given discovered ref list, and remove all reference objects 1335 // whose referents are still alive, whose referents are NULL or which 1336 // are not active (have a non-NULL next field). NOTE: When we are 1337 // thus precleaning the ref lists (which happens single-threaded today), 1338 // we do not disable refs discovery to honour the correct semantics of 1339 // java.lang.Reference. As a result, we need to be careful below 1340 // that ref removal steps interleave safely with ref discovery steps 1341 // (in this thread). 1342 void 1343 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1344 BoolObjectClosure* is_alive, 1345 OopClosure* keep_alive, 1346 VoidClosure* complete_gc, 1347 YieldClosure* yield) { 1348 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1349 while (iter.has_next()) { 1350 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1351 oop obj = iter.obj(); 1352 oop next = java_lang_ref_Reference::next(obj); 1353 if (iter.referent() == NULL || iter.is_referent_alive() || 1354 next != NULL) { 1355 // The referent has been cleared, or is alive, or the Reference is not 1356 // active; we need to trace and mark its cohort. 1357 if (TraceReferenceGC) { 1358 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1359 (void *)iter.obj(), iter.obj()->klass()->internal_name()); 1360 } 1361 // Remove Reference object from list 1362 iter.remove(); 1363 // Keep alive its cohort. 1364 iter.make_referent_alive(); 1365 if (UseCompressedOops) { 1366 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1367 keep_alive->do_oop(next_addr); 1368 } else { 1369 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1370 keep_alive->do_oop(next_addr); 1371 } 1372 iter.move_to_next(); 1373 } else { 1374 iter.next(); 1375 } 1376 } 1377 // Close the reachable set 1378 complete_gc->do_void(); 1379 1380 NOT_PRODUCT( 1381 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 1382 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1383 "Refs in discovered list " INTPTR_FORMAT, 1384 iter.removed(), iter.processed(), (address)refs_list.head()); 1385 } 1386 ) 1387 } 1388 1389 const char* ReferenceProcessor::list_name(uint i) { 1390 assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(), 1391 "Out of bounds index"); 1392 1393 int j = i / _max_num_q; 1394 switch (j) { 1395 case 0: return "SoftRef"; 1396 case 1: return "WeakRef"; 1397 case 2: return "FinalRef"; 1398 case 3: return "PhantomRef"; 1399 case 4: return "CleanerRef"; 1400 } 1401 ShouldNotReachHere(); 1402 return NULL; 1403 } 1404 1405 #ifndef PRODUCT 1406 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1407 // empty for now 1408 } 1409 #endif 1410 1411 #ifndef PRODUCT 1412 void ReferenceProcessor::clear_discovered_references() { 1413 guarantee(!_discovering_refs, "Discovering refs?"); 1414 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1415 clear_discovered_references(_discovered_refs[i]); 1416 } 1417 } 1418 1419 #endif // PRODUCT