180 return progress;
181 }
182
183 //------------------------------Value-----------------------------------------
184 const Type* MulNode::Value(PhaseGVN* phase) const {
185 const Type *t1 = phase->type( in(1) );
186 const Type *t2 = phase->type( in(2) );
187 // Either input is TOP ==> the result is TOP
188 if( t1 == Type::TOP ) return Type::TOP;
189 if( t2 == Type::TOP ) return Type::TOP;
190
191 // Either input is ZERO ==> the result is ZERO.
192 // Not valid for floats or doubles since +0.0 * -0.0 --> +0.0
193 int op = Opcode();
194 if( op == Op_MulI || op == Op_AndI || op == Op_MulL || op == Op_AndL ) {
195 const Type *zero = add_id(); // The multiplicative zero
196 if( t1->higher_equal( zero ) ) return zero;
197 if( t2->higher_equal( zero ) ) return zero;
198 }
199
200 // Either input is BOTTOM ==> the result is the local BOTTOM
201 if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
202 return bottom_type();
203
204 #if defined(IA32)
205 // Can't trust native compilers to properly fold strict double
206 // multiplication with round-to-zero on this platform.
207 if (op == Op_MulD) {
208 return TypeD::DOUBLE;
209 }
210 #endif
211
212 return mul_ring(t1,t2); // Local flavor of type multiplication
213 }
214
215 MulNode* MulNode::make(Node* in1, Node* in2, BasicType bt) {
216 switch (bt) {
217 case T_INT:
218 return new MulINode(in1, in2);
219 case T_LONG:
882 return new ConvI2LNode(andi);
883 }
884
885 // Masking off sign bits? Dont make them!
886 if (op == Op_RShiftL) {
887 const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
888 if( t12 && t12->is_con() ) { // Shift is by a constant
889 int shift = t12->get_con();
890 shift &= BitsPerJavaLong - 1; // semantics of Java shifts
891 const julong sign_bits_mask = ~(((julong)CONST64(1) << (julong)(BitsPerJavaLong - shift)) -1);
892 // If the AND'ing of the 2 masks has no bits, then only original shifted
893 // bits survive. NO sign-extension bits survive the maskings.
894 if( (sign_bits_mask & mask) == 0 ) {
895 // Use zero-fill shift instead
896 Node *zshift = phase->transform(new URShiftLNode(in1->in(1), in1->in(2)));
897 return new AndLNode(zshift, in(2));
898 }
899 }
900 }
901
902 return MulNode::Ideal(phase, can_reshape);
903 }
904
905 LShiftNode* LShiftNode::make(Node* in1, Node* in2, BasicType bt) {
906 switch (bt) {
907 case T_INT:
908 return new LShiftINode(in1, in2);
909 case T_LONG:
910 return new LShiftLNode(in1, in2);
911 default:
912 fatal("Not implemented for %s", type2name(bt));
913 }
914 return nullptr;
915 }
916
917 //=============================================================================
918
919 static bool const_shift_count(PhaseGVN* phase, Node* shiftNode, int* count) {
920 const TypeInt* tcount = phase->type(shiftNode->in(2))->isa_int();
921 if (tcount != nullptr && tcount->is_con()) {
|
180 return progress;
181 }
182
183 //------------------------------Value-----------------------------------------
184 const Type* MulNode::Value(PhaseGVN* phase) const {
185 const Type *t1 = phase->type( in(1) );
186 const Type *t2 = phase->type( in(2) );
187 // Either input is TOP ==> the result is TOP
188 if( t1 == Type::TOP ) return Type::TOP;
189 if( t2 == Type::TOP ) return Type::TOP;
190
191 // Either input is ZERO ==> the result is ZERO.
192 // Not valid for floats or doubles since +0.0 * -0.0 --> +0.0
193 int op = Opcode();
194 if( op == Op_MulI || op == Op_AndI || op == Op_MulL || op == Op_AndL ) {
195 const Type *zero = add_id(); // The multiplicative zero
196 if( t1->higher_equal( zero ) ) return zero;
197 if( t2->higher_equal( zero ) ) return zero;
198 }
199
200 // Code pattern on return from a call that returns an __Value. Can
201 // be optimized away if the return value turns out to be an oop.
202 if (op == Op_AndX &&
203 in(1) != nullptr &&
204 in(1)->Opcode() == Op_CastP2X &&
205 in(1)->in(1) != nullptr &&
206 phase->type(in(1)->in(1))->isa_oopptr() &&
207 t2->isa_intptr_t()->_lo >= 0 &&
208 t2->isa_intptr_t()->_hi <= MinObjAlignmentInBytesMask) {
209 return add_id();
210 }
211
212 // Either input is BOTTOM ==> the result is the local BOTTOM
213 if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
214 return bottom_type();
215
216 #if defined(IA32)
217 // Can't trust native compilers to properly fold strict double
218 // multiplication with round-to-zero on this platform.
219 if (op == Op_MulD) {
220 return TypeD::DOUBLE;
221 }
222 #endif
223
224 return mul_ring(t1,t2); // Local flavor of type multiplication
225 }
226
227 MulNode* MulNode::make(Node* in1, Node* in2, BasicType bt) {
228 switch (bt) {
229 case T_INT:
230 return new MulINode(in1, in2);
231 case T_LONG:
894 return new ConvI2LNode(andi);
895 }
896
897 // Masking off sign bits? Dont make them!
898 if (op == Op_RShiftL) {
899 const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
900 if( t12 && t12->is_con() ) { // Shift is by a constant
901 int shift = t12->get_con();
902 shift &= BitsPerJavaLong - 1; // semantics of Java shifts
903 const julong sign_bits_mask = ~(((julong)CONST64(1) << (julong)(BitsPerJavaLong - shift)) -1);
904 // If the AND'ing of the 2 masks has no bits, then only original shifted
905 // bits survive. NO sign-extension bits survive the maskings.
906 if( (sign_bits_mask & mask) == 0 ) {
907 // Use zero-fill shift instead
908 Node *zshift = phase->transform(new URShiftLNode(in1->in(1), in1->in(2)));
909 return new AndLNode(zshift, in(2));
910 }
911 }
912 }
913
914 // Search for GraphKit::mark_word_test patterns and fold the test if the result is statically known
915 Node* load1 = in(1);
916 Node* load2 = nullptr;
917 if (load1->is_Phi() && phase->type(load1)->isa_long()) {
918 load1 = in(1)->in(1);
919 load2 = in(1)->in(2);
920 }
921 if (load1 != nullptr && load1->is_Load() && phase->type(load1)->isa_long() &&
922 (load2 == nullptr || (load2->is_Load() && phase->type(load2)->isa_long()))) {
923 const TypePtr* adr_t1 = phase->type(load1->in(MemNode::Address))->isa_ptr();
924 const TypePtr* adr_t2 = (load2 != nullptr) ? phase->type(load2->in(MemNode::Address))->isa_ptr() : nullptr;
925 if (adr_t1 != nullptr && adr_t1->offset() == oopDesc::mark_offset_in_bytes() &&
926 (load2 == nullptr || (adr_t2 != nullptr && adr_t2->offset() == in_bytes(Klass::prototype_header_offset())))) {
927 if (mask == markWord::inline_type_pattern) {
928 if (adr_t1->is_inlinetypeptr()) {
929 set_req_X(1, in(2), phase);
930 return this;
931 } else if (!adr_t1->can_be_inline_type()) {
932 set_req_X(1, phase->longcon(0), phase);
933 return this;
934 }
935 } else if (mask == markWord::null_free_array_bit_in_place) {
936 if (adr_t1->is_null_free()) {
937 set_req_X(1, in(2), phase);
938 return this;
939 } else if (adr_t1->is_not_null_free()) {
940 set_req_X(1, phase->longcon(0), phase);
941 return this;
942 }
943 } else if (mask == markWord::flat_array_bit_in_place) {
944 if (adr_t1->is_flat()) {
945 set_req_X(1, in(2), phase);
946 return this;
947 } else if (adr_t1->is_not_flat()) {
948 set_req_X(1, phase->longcon(0), phase);
949 return this;
950 }
951 }
952 }
953 }
954
955 return MulNode::Ideal(phase, can_reshape);
956 }
957
958 LShiftNode* LShiftNode::make(Node* in1, Node* in2, BasicType bt) {
959 switch (bt) {
960 case T_INT:
961 return new LShiftINode(in1, in2);
962 case T_LONG:
963 return new LShiftLNode(in1, in2);
964 default:
965 fatal("Not implemented for %s", type2name(bt));
966 }
967 return nullptr;
968 }
969
970 //=============================================================================
971
972 static bool const_shift_count(PhaseGVN* phase, Node* shiftNode, int* count) {
973 const TypeInt* tcount = phase->type(shiftNode->in(2))->isa_int();
974 if (tcount != nullptr && tcount->is_con()) {
|