107 // no_ctrl, but that doesn't buy much latitude.
108 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
109 __ if_then(card_val, BoolTest::ne, dirty);
110 }
111
112 // Smash dirty value into card
113 __ store(__ ctrl(), card_adr, dirty, T_BYTE, adr_type, MemNode::unordered);
114
115 if (UseCondCardMark) {
116 __ end_if();
117 }
118
119 // Final sync IdealKit and GraphKit.
120 kit->final_sync(ideal);
121 }
122
123 bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() {
124 return ReduceInitialCardMarks;
125 }
126
127 void CardTableBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
128 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
129 Node *shift = node->unique_out();
130 Node *addp = shift->unique_out();
131 for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
132 Node *mem = addp->last_out(j);
133 if (UseCondCardMark && mem->is_Load()) {
134 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
135 // The load is checking if the card has been written so
136 // replace it with zero to fold the test.
137 macro->replace_node(mem, macro->intcon(0));
138 continue;
139 }
140 assert(mem->is_Store(), "store required");
141 macro->replace_node(mem, mem->in(MemNode::Memory));
142 }
143 }
144
145 bool CardTableBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
146 bool is_oop = is_reference_type(type);
147 return is_oop && (!tightly_coupled_alloc || !use_ReduceInitialCardMarks());
148 }
|
107 // no_ctrl, but that doesn't buy much latitude.
108 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
109 __ if_then(card_val, BoolTest::ne, dirty);
110 }
111
112 // Smash dirty value into card
113 __ store(__ ctrl(), card_adr, dirty, T_BYTE, adr_type, MemNode::unordered);
114
115 if (UseCondCardMark) {
116 __ end_if();
117 }
118
119 // Final sync IdealKit and GraphKit.
120 kit->final_sync(ideal);
121 }
122
123 bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() {
124 return ReduceInitialCardMarks;
125 }
126
127 void CardTableBarrierSetC2::eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
128 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
129 for (DUIterator_Last imin, i = node->last_outs(imin); i >= imin; --i) {
130 Node* shift = node->last_out(i);
131 for (DUIterator_Last jmin, j = shift->last_outs(jmin); j >= jmin; --j) {
132 Node* addp = shift->last_out(j);
133 for (DUIterator_Last kmin, k = addp->last_outs(kmin); k >= kmin; --k) {
134 Node* mem = addp->last_out(k);
135 if (UseCondCardMark && mem->is_Load()) {
136 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
137 // The load is checking if the card has been written so
138 // replace it with zero to fold the test.
139 igvn->replace_node(mem, igvn->intcon(0));
140 continue;
141 }
142 assert(mem->is_Store(), "store required");
143 igvn->replace_node(mem, mem->in(MemNode::Memory));
144 }
145 }
146 }
147 }
148
149 bool CardTableBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
150 bool is_oop = type == T_OBJECT || type == T_ARRAY;
151 return is_oop && (!tightly_coupled_alloc || !use_ReduceInitialCardMarks());
152 }
|