7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "gc/g1/c2/g1BarrierSetC2.hpp"
28 #include "gc/g1/g1BarrierSet.hpp"
29 #include "gc/g1/g1BarrierSetRuntime.hpp"
30 #include "gc/g1/g1CardTable.hpp"
31 #include "gc/g1/g1ThreadLocalData.hpp"
32 #include "gc/g1/g1HeapRegion.hpp"
33 #include "opto/arraycopynode.hpp"
34 #include "opto/compile.hpp"
35 #include "opto/escape.hpp"
36 #include "opto/graphKit.hpp"
37 #include "opto/idealKit.hpp"
38 #include "opto/macro.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/type.hpp"
41 #include "utilities/macros.hpp"
42
43 const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {
44 const Type **fields = TypeTuple::fields(2);
45 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
46 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
423
424 // Offsets into the thread
425 const int index_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
426 const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
427
428 // Pointers into the thread
429
430 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
431 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
432
433 // Now some values
434 // Use ctrl to avoid hoisting these values past a safepoint, which could
435 // potentially reset these fields in the JavaThread.
436 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
437 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
438
439 // Convert the store obj pointer to an int prior to doing math on it
440 // Must use ctrl to prevent "integerized oop" existing across safepoint
441 Node* cast = __ CastPX(__ ctrl(), adr);
442
443 // Divide pointer by card size
444 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift()) );
445
446 // Combine card table base and card offset
447 Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
448
449 // If we know the value being stored does it cross regions?
450
451 if (val != nullptr) {
452 // Does the store cause us to cross regions?
453
454 // Should be able to do an unsigned compare of region_size instead of
455 // and extra shift. Do we have an unsigned compare??
456 // Node* region_size = __ ConI(1 << G1HeapRegion::LogOfHRGrainBytes);
457 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)));
458
459 // if (xor_res == 0) same region so skip
460 __ if_then(xor_res, BoolTest::ne, zeroX, likely); {
461
462 // No barrier if we are storing a null.
463 __ if_then(val, BoolTest::ne, kit->null(), likely); {
464
465 // Ok must mark the card if not already dirty
466
467 // load the original value of the card
468 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
469
470 __ if_then(card_val, BoolTest::ne, young_card, unlikely); {
471 kit->sync_kit(ideal);
472 kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
473 __ sync_kit(kit);
474
475 Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
476 __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
477 g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
478 } __ end_if();
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #if INCLUDE_CDS
28 #include "code/SCCache.hpp"
29 #endif
30 #include "gc/g1/c2/g1BarrierSetC2.hpp"
31 #include "gc/g1/g1BarrierSet.hpp"
32 #include "gc/g1/g1BarrierSetRuntime.hpp"
33 #include "gc/g1/g1CardTable.hpp"
34 #include "gc/g1/g1ThreadLocalData.hpp"
35 #include "gc/g1/g1HeapRegion.hpp"
36 #include "opto/arraycopynode.hpp"
37 #include "opto/compile.hpp"
38 #include "opto/escape.hpp"
39 #include "opto/graphKit.hpp"
40 #include "opto/idealKit.hpp"
41 #include "opto/macro.hpp"
42 #include "opto/rootnode.hpp"
43 #include "opto/type.hpp"
44 #include "utilities/macros.hpp"
45
46 const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {
47 const Type **fields = TypeTuple::fields(2);
48 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
49 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
426
427 // Offsets into the thread
428 const int index_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
429 const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
430
431 // Pointers into the thread
432
433 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
434 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
435
436 // Now some values
437 // Use ctrl to avoid hoisting these values past a safepoint, which could
438 // potentially reset these fields in the JavaThread.
439 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
440 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
441
442 // Convert the store obj pointer to an int prior to doing math on it
443 // Must use ctrl to prevent "integerized oop" existing across safepoint
444 Node* cast = __ CastPX(__ ctrl(), adr);
445
446 Node* card_shift;
447 #if INCLUDE_CDS
448 if (SCCache::is_on_for_write()) {
449 // load the card shift from the AOT Runtime Constants area
450 Node* card_shift_adr = __ makecon(TypeRawPtr::make(AOTRuntimeConstants::card_shift_address()));
451 card_shift = __ load(__ ctrl(), card_shift_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
452 } else
453 #endif
454 {
455 card_shift = __ ConI(CardTable::card_shift());
456 }
457 // Divide pointer by card size
458 Node* card_offset = __ URShiftX( cast, card_shift );
459
460 // Combine card table base and card offset
461 Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
462
463 // If we know the value being stored does it cross regions?
464
465 if (val != nullptr) {
466 // Does the store cause us to cross regions?
467
468 // Should be able to do an unsigned compare of region_size instead of
469 // and extra shift. Do we have an unsigned compare??
470 // Node* region_size = __ ConI(1 << G1HeapRegion::LogOfHRGrainBytes);
471 #if INCLUDE_CDS
472 Node* xor_res = __ XorX( cast, __ CastPX(__ ctrl(), val));
473 if (SCCache::is_on_for_write()) {
474 // load the grain shift from the AOT Runtime Constants area
475 Node* grain_shift_adr = __ makecon(TypeRawPtr::make(AOTRuntimeConstants::grain_shift_address()));
476 Node* grain_shift = __ load(__ ctrl(), grain_shift_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
477 xor_res = __ URShiftX( xor_res, grain_shift);
478 } else {
479 xor_res = __ URShiftX ( xor_res, __ ConI(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)));
480 }
481 #else
482 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)));
483 #endif
484 // if (xor_res == 0) same region so skip
485 __ if_then(xor_res, BoolTest::ne, zeroX, likely); {
486
487 // No barrier if we are storing a null.
488 __ if_then(val, BoolTest::ne, kit->null(), likely); {
489
490 // Ok must mark the card if not already dirty
491
492 // load the original value of the card
493 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
494
495 __ if_then(card_val, BoolTest::ne, young_card, unlikely); {
496 kit->sync_kit(ideal);
497 kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
498 __ sync_kit(kit);
499
500 Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
501 __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
502 g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
503 } __ end_if();
|