1 /* 2 * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "gc/g1/c2/g1BarrierSetC2.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1BarrierSetRuntime.hpp" 30 #include "gc/g1/g1CardTable.hpp" 31 #include "gc/g1/g1ThreadLocalData.hpp" 32 #include "gc/g1/g1HeapRegion.hpp" 33 #include "opto/arraycopynode.hpp" 34 #include "opto/compile.hpp" 35 #include "opto/escape.hpp" 36 #include "opto/graphKit.hpp" 37 #include "opto/idealKit.hpp" 38 #include "opto/macro.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/type.hpp" 41 #include "utilities/macros.hpp" 42 43 const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() { 44 const Type **fields = TypeTuple::fields(2); 45 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value 46 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 47 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 48 49 // create result type (range) 50 fields = TypeTuple::fields(0); 51 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 52 53 return TypeFunc::make(domain, range); 54 } 55 56 const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() { 57 const Type **fields = TypeTuple::fields(2); 58 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr 59 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread 60 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 61 62 // create result type (range) 63 fields = TypeTuple::fields(0); 64 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 65 66 return TypeFunc::make(domain, range); 67 } 68 69 #define __ ideal. 70 /* 71 * Determine if the G1 pre-barrier can be removed. The pre-barrier is 72 * required by SATB to make sure all objects live at the start of the 73 * marking are kept alive, all reference updates need to any previous 74 * reference stored before writing. 75 * 76 * If the previous value is null there is no need to save the old value. 77 * References that are null are filtered during runtime by the barrier 78 * code to avoid unnecessary queuing. 79 * 80 * However in the case of newly allocated objects it might be possible to 81 * prove that the reference about to be overwritten is null during compile 82 * time and avoid adding the barrier code completely. 83 * 84 * The compiler needs to determine that the object in which a field is about 85 * to be written is newly allocated, and that no prior store to the same field 86 * has happened since the allocation. 87 * 88 * Returns true if the pre-barrier can be removed 89 */ 90 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit, 91 PhaseValues* phase, 92 Node* adr, 93 BasicType bt, 94 uint adr_idx) const { 95 intptr_t offset = 0; 96 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 97 AllocateNode* alloc = AllocateNode::Ideal_allocation(base); 98 99 if (offset == Type::OffsetBot) { 100 return false; // cannot unalias unless there are precise offsets 101 } 102 103 if (alloc == nullptr) { 104 return false; // No allocation found 105 } 106 107 intptr_t size_in_bytes = type2aelembytes(bt); 108 109 Node* mem = kit->memory(adr_idx); // start searching here... 110 111 for (int cnt = 0; cnt < 50; cnt++) { 112 113 if (mem->is_Store()) { 114 115 Node* st_adr = mem->in(MemNode::Address); 116 intptr_t st_offset = 0; 117 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 118 119 if (st_base == nullptr) { 120 break; // inscrutable pointer 121 } 122 123 // Break we have found a store with same base and offset as ours so break 124 if (st_base == base && st_offset == offset) { 125 break; 126 } 127 128 if (st_offset != offset && st_offset != Type::OffsetBot) { 129 const int MAX_STORE = BytesPerLong; 130 if (st_offset >= offset + size_in_bytes || 131 st_offset <= offset - MAX_STORE || 132 st_offset <= offset - mem->as_Store()->memory_size()) { 133 // Success: The offsets are provably independent. 134 // (You may ask, why not just test st_offset != offset and be done? 135 // The answer is that stores of different sizes can co-exist 136 // in the same sequence of RawMem effects. We sometimes initialize 137 // a whole 'tile' of array elements with a single jint or jlong.) 138 mem = mem->in(MemNode::Memory); 139 continue; // advance through independent store memory 140 } 141 } 142 143 if (st_base != base 144 && MemNode::detect_ptr_independence(base, alloc, st_base, 145 AllocateNode::Ideal_allocation(st_base), 146 phase)) { 147 // Success: The bases are provably independent. 148 mem = mem->in(MemNode::Memory); 149 continue; // advance through independent store memory 150 } 151 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 152 153 InitializeNode* st_init = mem->in(0)->as_Initialize(); 154 AllocateNode* st_alloc = st_init->allocation(); 155 156 // Make sure that we are looking at the same allocation site. 157 // The alloc variable is guaranteed to not be null here from earlier check. 158 if (alloc == st_alloc) { 159 // Check that the initialization is storing null so that no previous store 160 // has been moved up and directly write a reference 161 Node* captured_store = st_init->find_captured_store(offset, 162 type2aelembytes(T_OBJECT), 163 phase); 164 if (captured_store == nullptr || captured_store == st_init->zero_memory()) { 165 return true; 166 } 167 } 168 } 169 170 // Unless there is an explicit 'continue', we must bail out here, 171 // because 'mem' is an inscrutable memory state (e.g., a call). 172 break; 173 } 174 175 return false; 176 } 177 178 // G1 pre/post barriers 179 void G1BarrierSetC2::pre_barrier(GraphKit* kit, 180 bool do_load, 181 Node* ctl, 182 Node* obj, 183 Node* adr, 184 uint alias_idx, 185 Node* val, 186 const TypeOopPtr* val_type, 187 Node* pre_val, 188 BasicType bt) const { 189 // Some sanity checks 190 // Note: val is unused in this routine. 191 192 if (do_load) { 193 // We need to generate the load of the previous value 194 assert(obj != nullptr, "must have a base"); 195 assert(adr != nullptr, "where are loading from?"); 196 assert(pre_val == nullptr, "loaded already?"); 197 assert(val_type != nullptr, "need a type"); 198 199 if (use_ReduceInitialCardMarks() 200 && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) { 201 return; 202 } 203 204 } else { 205 // In this case both val_type and alias_idx are unused. 206 assert(pre_val != nullptr, "must be loaded already"); 207 // Nothing to be done if pre_val is null. 208 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; 209 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); 210 } 211 assert(bt == T_OBJECT, "or we shouldn't be here"); 212 213 IdealKit ideal(kit, true); 214 215 Node* tls = __ thread(); // ThreadLocalStorage 216 217 Node* no_base = __ top(); 218 Node* zero = __ ConI(0); 219 Node* zeroX = __ ConX(0); 220 221 float likely = PROB_LIKELY(0.999); 222 float unlikely = PROB_UNLIKELY(0.999); 223 224 BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE; 225 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width"); 226 227 // Offsets into the thread 228 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); 229 const int index_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()); 230 const int buffer_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()); 231 232 // Now the actual pointers into the thread 233 Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset)); 234 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 235 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 236 237 // Now some of the values 238 Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); 239 240 // if (!marking) 241 __ if_then(marking, BoolTest::ne, zero, unlikely); { 242 BasicType index_bt = TypeX_X->basic_type(); 243 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size."); 244 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); 245 246 if (do_load) { 247 // load original value 248 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx, false, MemNode::unordered, LoadNode::Pinned); 249 } 250 251 // if (pre_val != nullptr) 252 __ if_then(pre_val, BoolTest::ne, kit->null()); { 253 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 254 255 // is the queue for this thread full? 256 __ if_then(index, BoolTest::ne, zeroX, likely); { 257 258 // decrement the index 259 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 260 261 // Now get the buffer location we will log the previous value into and store it 262 Node *log_addr = __ AddP(no_base, buffer, next_index); 263 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); 264 // update the index 265 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); 266 267 } __ else_(); { 268 269 // logging buffer is full, call the runtime 270 const TypeFunc *tf = write_ref_field_pre_entry_Type(); 271 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls); 272 } __ end_if(); // (!index) 273 } __ end_if(); // (pre_val != nullptr) 274 } __ end_if(); // (!marking) 275 276 // Final sync IdealKit and GraphKit. 277 kit->final_sync(ideal); 278 } 279 280 /* 281 * G1 similar to any GC with a Young Generation requires a way to keep track of 282 * references from Old Generation to Young Generation to make sure all live 283 * objects are found. G1 also requires to keep track of object references 284 * between different regions to enable evacuation of old regions, which is done 285 * as part of mixed collections. References are tracked in remembered sets and 286 * is continuously updated as reference are written to with the help of the 287 * post-barrier. 288 * 289 * To reduce the number of updates to the remembered set the post-barrier 290 * filters updates to fields in objects located in the Young Generation, 291 * the same region as the reference, when the null is being written or 292 * if the card is already marked as dirty by an earlier write. 293 * 294 * Under certain circumstances it is possible to avoid generating the 295 * post-barrier completely if it is possible during compile time to prove 296 * the object is newly allocated and that no safepoint exists between the 297 * allocation and the store. 298 * 299 * In the case of slow allocation the allocation code must handle the barrier 300 * as part of the allocation in the case the allocated object is not located 301 * in the nursery; this would happen for humongous objects. 302 * 303 * Returns true if the post barrier can be removed 304 */ 305 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit, 306 PhaseValues* phase, Node* store, 307 Node* adr) const { 308 intptr_t offset = 0; 309 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 310 AllocateNode* alloc = AllocateNode::Ideal_allocation(base); 311 312 if (offset == Type::OffsetBot) { 313 return false; // cannot unalias unless there are precise offsets 314 } 315 316 if (alloc == nullptr) { 317 return false; // No allocation found 318 } 319 320 // Start search from Store node 321 Node* mem = store->in(MemNode::Control); 322 if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 323 324 InitializeNode* st_init = mem->in(0)->as_Initialize(); 325 AllocateNode* st_alloc = st_init->allocation(); 326 327 // Make sure we are looking at the same allocation 328 if (alloc == st_alloc) { 329 return true; 330 } 331 } 332 333 return false; 334 } 335 336 // 337 // Update the card table and add card address to the queue 338 // 339 void G1BarrierSetC2::g1_mark_card(GraphKit* kit, 340 IdealKit& ideal, 341 Node* card_adr, 342 Node* oop_store, 343 uint oop_alias_idx, 344 Node* index, 345 Node* index_adr, 346 Node* buffer, 347 const TypeFunc* tf) const { 348 Node* zero = __ ConI(0); 349 Node* zeroX = __ ConX(0); 350 Node* no_base = __ top(); 351 BasicType card_bt = T_BYTE; 352 // Smash zero into card. MUST BE ORDERED WRT TO STORE 353 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw); 354 355 // Now do the queue work 356 __ if_then(index, BoolTest::ne, zeroX); { 357 358 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 359 Node* log_addr = __ AddP(no_base, buffer, next_index); 360 361 // Order, see storeCM. 362 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); 363 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered); 364 365 } __ else_(); { 366 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread()); 367 } __ end_if(); 368 369 } 370 371 void G1BarrierSetC2::post_barrier(GraphKit* kit, 372 Node* ctl, 373 Node* oop_store, 374 Node* obj, 375 Node* adr, 376 uint alias_idx, 377 Node* val, 378 BasicType bt, 379 bool use_precise) const { 380 // If we are writing a null then we need no post barrier 381 382 if (val != nullptr && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) { 383 // Must be null 384 const Type* t = val->bottom_type(); 385 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be null"); 386 // No post barrier if writing null 387 return; 388 } 389 390 if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) { 391 // We can skip marks on a freshly-allocated object in Eden. 392 // Keep this code in sync with CardTableBarrierSet::on_slowpath_allocation_exit. 393 // That routine informs GC to take appropriate compensating steps, 394 // upon a slow-path allocation, so as to make this card-mark 395 // elision safe. 396 return; 397 } 398 399 if (use_ReduceInitialCardMarks() 400 && g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) { 401 return; 402 } 403 404 if (!use_precise) { 405 // All card marks for a (non-array) instance are in one place: 406 adr = obj; 407 } 408 // (Else it's an array (or unknown), and we want more precise card marks.) 409 assert(adr != nullptr, ""); 410 411 IdealKit ideal(kit, true); 412 413 Node* tls = __ thread(); // ThreadLocalStorage 414 415 Node* no_base = __ top(); 416 float likely = PROB_LIKELY_MAG(3); 417 float unlikely = PROB_UNLIKELY_MAG(3); 418 Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val()); 419 Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val()); 420 Node* zeroX = __ ConX(0); 421 422 const TypeFunc *tf = write_ref_field_post_entry_Type(); 423 424 // Offsets into the thread 425 const int index_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()); 426 const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()); 427 428 // Pointers into the thread 429 430 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 431 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 432 433 // Now some values 434 // Use ctrl to avoid hoisting these values past a safepoint, which could 435 // potentially reset these fields in the JavaThread. 436 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw); 437 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 438 439 // Convert the store obj pointer to an int prior to doing math on it 440 // Must use ctrl to prevent "integerized oop" existing across safepoint 441 Node* cast = __ CastPX(__ ctrl(), adr); 442 443 // Divide pointer by card size 444 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift()) ); 445 446 // Combine card table base and card offset 447 Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset ); 448 449 // If we know the value being stored does it cross regions? 450 451 if (val != nullptr) { 452 // Does the store cause us to cross regions? 453 454 // Should be able to do an unsigned compare of region_size instead of 455 // and extra shift. Do we have an unsigned compare?? 456 // Node* region_size = __ ConI(1 << G1HeapRegion::LogOfHRGrainBytes); 457 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes))); 458 459 // if (xor_res == 0) same region so skip 460 __ if_then(xor_res, BoolTest::ne, zeroX, likely); { 461 462 // No barrier if we are storing a null. 463 __ if_then(val, BoolTest::ne, kit->null(), likely); { 464 465 // Ok must mark the card if not already dirty 466 467 // load the original value of the card 468 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 469 470 __ if_then(card_val, BoolTest::ne, young_card, unlikely); { 471 kit->sync_kit(ideal); 472 kit->insert_mem_bar(Op_MemBarVolatile, oop_store); 473 __ sync_kit(kit); 474 475 Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 476 __ if_then(card_val_reload, BoolTest::ne, dirty_card); { 477 g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 478 } __ end_if(); 479 } __ end_if(); 480 } __ end_if(); 481 } __ end_if(); 482 } else { 483 // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks. 484 // We don't need a barrier here if the destination is a newly allocated object 485 // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden 486 // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()). 487 assert(!use_ReduceInitialCardMarks(), "can only happen with card marking"); 488 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 489 __ if_then(card_val, BoolTest::ne, young_card); { 490 g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 491 } __ end_if(); 492 } 493 494 // Final sync IdealKit and GraphKit. 495 kit->final_sync(ideal); 496 } 497 498 // Helper that guards and inserts a pre-barrier. 499 void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, 500 Node* pre_val, bool need_mem_bar) const { 501 // We could be accessing the referent field of a reference object. If so, when G1 502 // is enabled, we need to log the value in the referent field in an SATB buffer. 503 // This routine performs some compile time filters and generates suitable 504 // runtime filters that guard the pre-barrier code. 505 // Also add memory barrier for non volatile load from the referent field 506 // to prevent commoning of loads across safepoint. 507 508 // Some compile time checks. 509 510 // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? 511 const TypeX* otype = offset->find_intptr_t_type(); 512 if (otype != nullptr && otype->is_con() && 513 otype->get_con() != java_lang_ref_Reference::referent_offset()) { 514 // Constant offset but not the reference_offset so just return 515 return; 516 } 517 518 // We only need to generate the runtime guards for instances. 519 const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr(); 520 if (btype != nullptr) { 521 if (btype->isa_aryptr()) { 522 // Array type so nothing to do 523 return; 524 } 525 526 const TypeInstPtr* itype = btype->isa_instptr(); 527 if (itype != nullptr) { 528 // Can the klass of base_oop be statically determined to be 529 // _not_ a sub-class of Reference and _not_ Object? 530 ciKlass* klass = itype->instance_klass(); 531 if (klass->is_loaded() && 532 !klass->is_subtype_of(kit->env()->Reference_klass()) && 533 !kit->env()->Object_klass()->is_subtype_of(klass)) { 534 return; 535 } 536 } 537 } 538 539 // The compile time filters did not reject base_oop/offset so 540 // we need to generate the following runtime filters 541 // 542 // if (offset == java_lang_ref_Reference::_reference_offset) { 543 // if (instance_of(base, java.lang.ref.Reference)) { 544 // pre_barrier(_, pre_val, ...); 545 // } 546 // } 547 548 float likely = PROB_LIKELY( 0.999); 549 float unlikely = PROB_UNLIKELY(0.999); 550 551 IdealKit ideal(kit); 552 553 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset()); 554 555 __ if_then(offset, BoolTest::eq, referent_off, unlikely); { 556 // Update graphKit memory and control from IdealKit. 557 kit->sync_kit(ideal); 558 559 Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass())); 560 Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con); 561 562 // Update IdealKit memory and control from graphKit. 563 __ sync_kit(kit); 564 565 Node* one = __ ConI(1); 566 // is_instof == 0 if base_oop == nullptr 567 __ if_then(is_instof, BoolTest::eq, one, unlikely); { 568 569 // Update graphKit from IdeakKit. 570 kit->sync_kit(ideal); 571 572 // Use the pre-barrier to record the value in the referent field 573 pre_barrier(kit, false /* do_load */, 574 __ ctrl(), 575 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */, 576 pre_val /* pre_val */, 577 T_OBJECT); 578 if (need_mem_bar) { 579 // Add memory barrier to prevent commoning reads from this field 580 // across safepoint since GC can change its value. 581 kit->insert_mem_bar(Op_MemBarCPUOrder); 582 } 583 // Update IdealKit from graphKit. 584 __ sync_kit(kit); 585 586 } __ end_if(); // _ref_type != ref_none 587 } __ end_if(); // offset == referent_offset 588 589 // Final sync IdealKit and GraphKit. 590 kit->final_sync(ideal); 591 } 592 593 #undef __ 594 595 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { 596 DecoratorSet decorators = access.decorators(); 597 Node* adr = access.addr().node(); 598 Node* obj = access.base(); 599 600 bool anonymous = (decorators & C2_UNSAFE_ACCESS) != 0; 601 bool mismatched = (decorators & C2_MISMATCHED) != 0; 602 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; 603 bool in_heap = (decorators & IN_HEAP) != 0; 604 bool in_native = (decorators & IN_NATIVE) != 0; 605 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 606 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 607 bool is_unordered = (decorators & MO_UNORDERED) != 0; 608 bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0; 609 bool is_mixed = !in_heap && !in_native; 610 bool need_cpu_mem_bar = !is_unordered || mismatched || is_mixed; 611 612 Node* top = Compile::current()->top(); 613 Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top; 614 615 // If we are reading the value of the referent field of a Reference 616 // object (either by using Unsafe directly or through reflection) 617 // then, if G1 is enabled, we need to record the referent in an 618 // SATB log buffer using the pre-barrier mechanism. 619 // Also we need to add memory barrier to prevent commoning reads 620 // from this field across safepoint since GC can change its value. 621 bool need_read_barrier = (((on_weak || on_phantom) && !no_keepalive) || 622 (in_heap && unknown && offset != top && obj != top)); 623 624 if (!access.is_oop() || !need_read_barrier) { 625 return CardTableBarrierSetC2::load_at_resolved(access, val_type); 626 } 627 628 assert(access.is_parse_access(), "entry not supported at optimization time"); 629 630 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); 631 GraphKit* kit = parse_access.kit(); 632 Node* load; 633 634 Node* control = kit->control(); 635 const TypePtr* adr_type = access.addr().type(); 636 MemNode::MemOrd mo = access.mem_node_mo(); 637 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0; 638 bool unaligned = (decorators & C2_UNALIGNED) != 0; 639 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0; 640 // Pinned control dependency is the strictest. So it's ok to substitute it for any other. 641 load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo, 642 LoadNode::Pinned, requires_atomic_access, unaligned, mismatched, unsafe, 643 access.barrier_data()); 644 645 646 if (on_weak || on_phantom) { 647 // Use the pre-barrier to record the value in the referent field 648 pre_barrier(kit, false /* do_load */, 649 kit->control(), 650 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */, 651 load /* pre_val */, T_OBJECT); 652 // Add memory barrier to prevent commoning reads from this field 653 // across safepoint since GC can change its value. 654 kit->insert_mem_bar(Op_MemBarCPUOrder); 655 } else if (unknown) { 656 // We do not require a mem bar inside pre_barrier if need_mem_bar 657 // is set: the barriers would be emitted by us. 658 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar); 659 } 660 661 return load; 662 } 663 664 bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const { 665 if (CardTableBarrierSetC2::is_gc_barrier_node(node)) { 666 return true; 667 } 668 if (node->Opcode() != Op_CallLeaf) { 669 return false; 670 } 671 CallLeafNode *call = node->as_CallLeaf(); 672 if (call->_name == nullptr) { 673 return false; 674 } 675 676 return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0; 677 } 678 679 bool G1BarrierSetC2::is_g1_pre_val_load(Node* n) { 680 if (n->is_Load() && n->as_Load()->has_pinned_control_dependency()) { 681 // Make sure the only users of it are: CmpP, StoreP, and a call to write_ref_field_pre_entry 682 683 // Skip possible decode 684 if (n->outcnt() == 1 && n->unique_out()->is_DecodeN()) { 685 n = n->unique_out(); 686 } 687 688 if (n->outcnt() == 3) { 689 int found = 0; 690 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) { 691 Node* use = iter.get(); 692 if (use->is_Cmp() || use->is_Store()) { 693 ++found; 694 } else if (use->is_CallLeaf()) { 695 CallLeafNode* call = use->as_CallLeaf(); 696 if (strcmp(call->_name, "write_ref_field_pre_entry") == 0) { 697 ++found; 698 } 699 } 700 } 701 if (found == 3) { 702 return true; 703 } 704 } 705 } 706 return false; 707 } 708 709 bool G1BarrierSetC2::is_gc_pre_barrier_node(Node *node) const { 710 return is_g1_pre_val_load(node); 711 } 712 713 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { 714 if (is_g1_pre_val_load(node)) { 715 macro->replace_node(node, macro->zerocon(node->as_Load()->bottom_type()->basic_type())); 716 } else { 717 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required"); 718 assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes"); 719 // It could be only one user, URShift node, in Object.clone() intrinsic 720 // but the new allocation is passed to arraycopy stub and it could not 721 // be scalar replaced. So we don't check the case. 722 723 // An other case of only one user (Xor) is when the value check for null 724 // in G1 post barrier is folded after CCP so the code which used URShift 725 // is removed. 726 727 // Take Region node before eliminating post barrier since it also 728 // eliminates CastP2X node when it has only one user. 729 Node* this_region = node->in(0); 730 assert(this_region != nullptr, ""); 731 732 // Remove G1 post barrier. 733 734 // Search for CastP2X->Xor->URShift->Cmp path which 735 // checks if the store done to a different from the value's region. 736 // And replace Cmp with #0 (false) to collapse G1 post barrier. 737 Node* xorx = node->find_out_with(Op_XorX); 738 if (xorx != nullptr) { 739 Node* shift = xorx->unique_out(); 740 Node* cmpx = shift->unique_out(); 741 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() && 742 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne, 743 "missing region check in G1 post barrier"); 744 macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ)); 745 746 // Remove G1 pre barrier. 747 748 // Search "if (marking != 0)" check and set it to "false". 749 // There is no G1 pre barrier if previous stored value is null 750 // (for example, after initialization). 751 if (this_region->is_Region() && this_region->req() == 3) { 752 int ind = 1; 753 if (!this_region->in(ind)->is_IfFalse()) { 754 ind = 2; 755 } 756 if (this_region->in(ind)->is_IfFalse() && 757 this_region->in(ind)->in(0)->Opcode() == Op_If) { 758 Node* bol = this_region->in(ind)->in(0)->in(1); 759 assert(bol->is_Bool(), ""); 760 cmpx = bol->in(1); 761 if (bol->as_Bool()->_test._test == BoolTest::ne && 762 cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) && 763 cmpx->in(1)->is_Load()) { 764 Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address); 765 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); 766 if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() && 767 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && 768 adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) { 769 macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ)); 770 } 771 } 772 } 773 } 774 } else { 775 assert(!use_ReduceInitialCardMarks(), "can only happen with card marking"); 776 // This is a G1 post barrier emitted by the Object.clone() intrinsic. 777 // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card 778 // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier. 779 Node* shift = node->find_out_with(Op_URShiftX); 780 assert(shift != nullptr, "missing G1 post barrier"); 781 Node* addp = shift->unique_out(); 782 Node* load = addp->find_out_with(Op_LoadB); 783 assert(load != nullptr, "missing G1 post barrier"); 784 Node* cmpx = load->unique_out(); 785 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() && 786 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne, 787 "missing card value check in G1 post barrier"); 788 macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ)); 789 // There is no G1 pre barrier in this case 790 } 791 // Now CastP2X can be removed since it is used only on dead path 792 // which currently still alive until igvn optimize it. 793 assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, ""); 794 macro->replace_node(node, macro->top()); 795 } 796 } 797 798 Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const { 799 if (!use_ReduceInitialCardMarks() && 800 c != nullptr && c->is_Region() && c->req() == 3) { 801 for (uint i = 1; i < c->req(); i++) { 802 if (c->in(i) != nullptr && c->in(i)->is_Region() && 803 c->in(i)->req() == 3) { 804 Node* r = c->in(i); 805 for (uint j = 1; j < r->req(); j++) { 806 if (r->in(j) != nullptr && r->in(j)->is_Proj() && 807 r->in(j)->in(0) != nullptr && 808 r->in(j)->in(0)->Opcode() == Op_CallLeaf && 809 r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) { 810 Node* call = r->in(j)->in(0); 811 c = c->in(i == 1 ? 2 : 1); 812 if (c != nullptr && c->Opcode() != Op_Parm) { 813 c = c->in(0); 814 if (c != nullptr) { 815 c = c->in(0); 816 assert(call->in(0) == nullptr || 817 call->in(0)->in(0) == nullptr || 818 call->in(0)->in(0)->in(0) == nullptr || 819 call->in(0)->in(0)->in(0)->in(0) == nullptr || 820 call->in(0)->in(0)->in(0)->in(0)->in(0) == nullptr || 821 c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape"); 822 return c; 823 } 824 } 825 } 826 } 827 } 828 } 829 } 830 return c; 831 } 832 833 #ifdef ASSERT 834 bool G1BarrierSetC2::has_cas_in_use_chain(Node *n) const { 835 Unique_Node_List visited; 836 Node_List worklist; 837 worklist.push(n); 838 while (worklist.size() > 0) { 839 Node* x = worklist.pop(); 840 if (visited.member(x)) { 841 continue; 842 } else { 843 visited.push(x); 844 } 845 846 if (x->is_LoadStore()) { 847 int op = x->Opcode(); 848 if (op == Op_CompareAndExchangeP || op == Op_CompareAndExchangeN || 849 op == Op_CompareAndSwapP || op == Op_CompareAndSwapN || 850 op == Op_WeakCompareAndSwapP || op == Op_WeakCompareAndSwapN) { 851 return true; 852 } 853 } 854 if (!x->is_CFG()) { 855 for (SimpleDUIterator iter(x); iter.has_next(); iter.next()) { 856 Node* use = iter.get(); 857 worklist.push(use); 858 } 859 } 860 } 861 return false; 862 } 863 864 void G1BarrierSetC2::verify_pre_load(Node* marking_if, Unique_Node_List& loads /*output*/) const { 865 assert(loads.size() == 0, "Loads list should be empty"); 866 Node* pre_val_if = marking_if->find_out_with(Op_IfTrue)->find_out_with(Op_If); 867 if (pre_val_if != nullptr) { 868 Unique_Node_List visited; 869 Node_List worklist; 870 Node* pre_val = pre_val_if->in(1)->in(1)->in(1); 871 872 worklist.push(pre_val); 873 while (worklist.size() > 0) { 874 Node* x = worklist.pop(); 875 if (visited.member(x)) { 876 continue; 877 } else { 878 visited.push(x); 879 } 880 881 if (has_cas_in_use_chain(x)) { 882 loads.clear(); 883 return; 884 } 885 886 if (x->is_Con()) { 887 continue; 888 } 889 if (x->is_EncodeP() || x->is_DecodeN()) { 890 worklist.push(x->in(1)); 891 continue; 892 } 893 if (x->is_Load() || x->is_LoadStore()) { 894 assert(x->in(0) != nullptr, "Pre-val load has to have a control"); 895 loads.push(x); 896 continue; 897 } 898 if (x->is_Phi()) { 899 for (uint i = 1; i < x->req(); i++) { 900 worklist.push(x->in(i)); 901 } 902 continue; 903 } 904 assert(false, "Pre-val anomaly"); 905 } 906 } 907 } 908 909 void G1BarrierSetC2::verify_no_safepoints(Compile* compile, Node* marking_check_if, const Unique_Node_List& loads) const { 910 if (loads.size() == 0) { 911 return; 912 } 913 914 if (loads.size() == 1) { // Handle the typical situation when there a single pre-value load 915 // that is dominated by the marking_check_if, that's true when the 916 // barrier itself does the pre-val load. 917 Node *pre_val = loads.at(0); 918 if (pre_val->in(0)->in(0) == marking_check_if) { // IfTrue->If 919 return; 920 } 921 } 922 923 // All other cases are when pre-value loads dominate the marking check. 924 Unique_Node_List controls; 925 for (uint i = 0; i < loads.size(); i++) { 926 Node *c = loads.at(i)->in(0); 927 controls.push(c); 928 } 929 930 Unique_Node_List visited; 931 Unique_Node_List safepoints; 932 Node_List worklist; 933 uint found = 0; 934 935 worklist.push(marking_check_if); 936 while (worklist.size() > 0 && found < controls.size()) { 937 Node* x = worklist.pop(); 938 if (x == nullptr || x == compile->top()) continue; 939 if (visited.member(x)) { 940 continue; 941 } else { 942 visited.push(x); 943 } 944 945 if (controls.member(x)) { 946 found++; 947 } 948 if (x->is_Region()) { 949 for (uint i = 1; i < x->req(); i++) { 950 worklist.push(x->in(i)); 951 } 952 } else { 953 if (!x->is_SafePoint()) { 954 worklist.push(x->in(0)); 955 } else { 956 safepoints.push(x); 957 } 958 } 959 } 960 assert(found == controls.size(), "Pre-barrier structure anomaly or possible safepoint"); 961 } 962 963 void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const { 964 if (phase != BarrierSetC2::BeforeCodeGen) { 965 return; 966 } 967 // Verify G1 pre-barriers 968 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); 969 970 Unique_Node_List visited; 971 Node_List worklist; 972 // We're going to walk control flow backwards starting from the Root 973 worklist.push(compile->root()); 974 while (worklist.size() > 0) { 975 Node* x = worklist.pop(); 976 if (x == nullptr || x == compile->top()) continue; 977 if (visited.member(x)) { 978 continue; 979 } else { 980 visited.push(x); 981 } 982 983 if (x->is_Region()) { 984 for (uint i = 1; i < x->req(); i++) { 985 worklist.push(x->in(i)); 986 } 987 } else { 988 worklist.push(x->in(0)); 989 // We are looking for the pattern: 990 // /->ThreadLocal 991 // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset) 992 // \->ConI(0) 993 // We want to verify that the If and the LoadB have the same control 994 // See GraphKit::g1_write_barrier_pre() 995 if (x->is_If()) { 996 IfNode *iff = x->as_If(); 997 if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) { 998 CmpNode *cmp = iff->in(1)->in(1)->as_Cmp(); 999 if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0 1000 && cmp->in(1)->is_Load()) { 1001 LoadNode* load = cmp->in(1)->as_Load(); 1002 if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal 1003 && load->in(2)->in(3)->is_Con() 1004 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) { 1005 1006 Node* if_ctrl = iff->in(0); 1007 Node* load_ctrl = load->in(0); 1008 1009 if (if_ctrl != load_ctrl) { 1010 // Skip possible CProj->NeverBranch in infinite loops 1011 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj) 1012 && if_ctrl->in(0)->is_NeverBranch()) { 1013 if_ctrl = if_ctrl->in(0)->in(0); 1014 } 1015 } 1016 assert(load_ctrl != nullptr && if_ctrl == load_ctrl, "controls must match"); 1017 1018 Unique_Node_List loads; 1019 verify_pre_load(iff, loads); 1020 verify_no_safepoints(compile, iff, loads); 1021 } 1022 } 1023 } 1024 } 1025 } 1026 } 1027 } 1028 #endif 1029 1030 bool G1BarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { 1031 if (opcode == Op_StoreP) { 1032 Node* adr = n->in(MemNode::Address); 1033 const Type* adr_type = gvn->type(adr); 1034 // Pointer stores in G1 barriers looks like unsafe access. 1035 // Ignore such stores to be able scalar replace non-escaping 1036 // allocations. 1037 if (adr_type->isa_rawptr() && adr->is_AddP()) { 1038 Node* base = conn_graph->get_addp_base(adr); 1039 if (base->Opcode() == Op_LoadP && 1040 base->in(MemNode::Address)->is_AddP()) { 1041 adr = base->in(MemNode::Address); 1042 Node* tls = conn_graph->get_addp_base(adr); 1043 if (tls->Opcode() == Op_ThreadLocal) { 1044 int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1045 const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()); 1046 if (offs == buf_offset) { 1047 return true; // G1 pre barrier previous oop value store. 1048 } 1049 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) { 1050 return true; // G1 post barrier card address store. 1051 } 1052 } 1053 } 1054 } 1055 } 1056 return false; 1057 }