< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

        

*** 50,59 **** --- 50,60 ---- #include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/runtime.hpp" #include "opto/rootnode.hpp" #include "opto/subnode.hpp" + #include "opto/valuetypenode.hpp" #include "prims/nativeLookup.hpp" #include "prims/unsafe.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/macros.hpp"
*** 161,171 **** Node* array_length, RegionNode* region); void generate_string_range_check(Node* array, Node* offset, Node* length, bool char_count); Node* generate_current_thread(Node* &tls_output); - Node* load_mirror_from_klass(Node* klass); Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null, RegionNode* region, int null_path, int offset); Node* load_klass_from_mirror(Node* mirror, bool never_see_null, RegionNode* region, int null_path) { --- 162,171 ----
*** 183,206 **** } Node* generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region); Node* generate_interface_guard(Node* kls, RegionNode* region); Node* generate_array_guard(Node* kls, RegionNode* region) { ! return generate_array_guard_common(kls, region, false, false); } Node* generate_non_array_guard(Node* kls, RegionNode* region) { ! return generate_array_guard_common(kls, region, false, true); } Node* generate_objArray_guard(Node* kls, RegionNode* region) { ! return generate_array_guard_common(kls, region, true, false); } Node* generate_non_objArray_guard(Node* kls, RegionNode* region) { ! return generate_array_guard_common(kls, region, true, true); } ! Node* generate_array_guard_common(Node* kls, RegionNode* region, ! bool obj_array, bool not_array); Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region); CallJavaNode* generate_method_call(vmIntrinsics::ID method_id, bool is_virtual = false, bool is_static = false); CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) { return generate_method_call(method_id, false, true); --- 183,223 ---- } Node* generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region); Node* generate_interface_guard(Node* kls, RegionNode* region); + Node* generate_value_guard(Node* kls, RegionNode* region); + + enum ArrayKind { + AnyArray, + NonArray, + ObjectArray, + NonObjectArray, + TypeArray, + ValueArray + }; + Node* generate_array_guard(Node* kls, RegionNode* region) { ! return generate_array_guard_common(kls, region, AnyArray); } Node* generate_non_array_guard(Node* kls, RegionNode* region) { ! return generate_array_guard_common(kls, region, NonArray); } Node* generate_objArray_guard(Node* kls, RegionNode* region) { ! return generate_array_guard_common(kls, region, ObjectArray); } Node* generate_non_objArray_guard(Node* kls, RegionNode* region) { ! return generate_array_guard_common(kls, region, NonObjectArray); ! } ! Node* generate_typeArray_guard(Node* kls, RegionNode* region) { ! return generate_array_guard_common(kls, region, TypeArray); } ! Node* generate_valueArray_guard(Node* kls, RegionNode* region) { ! assert(ValueArrayFlatten, "can never be flattened"); ! return generate_array_guard_common(kls, region, ValueArray); ! } ! Node* generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind); Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region); CallJavaNode* generate_method_call(vmIntrinsics::ID method_id, bool is_virtual = false, bool is_static = false); CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) { return generate_method_call(method_id, false, true);
*** 252,270 **** --- 269,290 ---- bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned); static bool klass_needs_init_guard(Node* kls); bool inline_unsafe_allocate(); bool inline_unsafe_newArray(bool uninitialized); bool inline_unsafe_copyMemory(); + bool inline_unsafe_make_private_buffer(); + bool inline_unsafe_finish_private_buffer(); bool inline_native_currentThread(); bool inline_native_time_funcs(address method, const char* funcName); #ifdef JFR_HAVE_INTRINSICS bool inline_native_classID(); bool inline_native_getEventWriter(); #endif bool inline_native_isInterrupted(); bool inline_native_Class_query(vmIntrinsics::ID id); + bool inline_value_Class_conversion(vmIntrinsics::ID id); bool inline_native_subtype_check(); bool inline_native_getLength(); bool inline_array_copyOf(bool is_copyOfRange); bool inline_array_equals(StrIntrinsicNode::ArgEnc ae); bool inline_preconditions_checkIndex();
*** 591,619 **** --- 611,643 ---- case vmIntrinsics::_compressStringC: case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress); case vmIntrinsics::_inflateStringC: case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress); + case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer(); + case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer(); case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false); case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false); case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false); case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false); case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false); case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false); case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false); case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false); case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false); + case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_VALUETYPE,Relaxed, false); case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false); case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false); case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false); case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false); case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false); case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false); case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false); case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false); case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false); + case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_VALUETYPE,Relaxed, false); case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false); case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false); case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false); case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
*** 774,783 **** --- 798,810 ---- case vmIntrinsics::_isArray: case vmIntrinsics::_isPrimitive: case vmIntrinsics::_getSuperclass: case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id()); + case vmIntrinsics::_asPrimaryType: + case vmIntrinsics::_asIndirectType: return inline_value_Class_conversion(intrinsic_id()); + case vmIntrinsics::_floatToRawIntBits: case vmIntrinsics::_floatToIntBits: case vmIntrinsics::_intBitsToFloat: case vmIntrinsics::_doubleToRawLongBits: case vmIntrinsics::_doubleToLongBits:
*** 2382,2403 **** ciSignature* sig = callee()->signature(); #ifdef ASSERT if (!is_store) { // Object getReference(Object base, int/long offset), etc. BasicType rtype = sig->return_type()->basic_type(); ! assert(rtype == type, "getter must return the expected value"); ! assert(sig->count() == 2, "oop getter has 2 arguments"); assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object"); assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct"); } else { // void putReference(Object base, int/long offset, Object x), etc. assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value"); ! assert(sig->count() == 3, "oop putter has 3 arguments"); assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object"); assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct"); BasicType vtype = sig->type_at(sig->count()-1)->basic_type(); ! assert(vtype == type, "putter must accept the expected value"); } #endif // ASSERT } #endif //PRODUCT --- 2409,2430 ---- ciSignature* sig = callee()->signature(); #ifdef ASSERT if (!is_store) { // Object getReference(Object base, int/long offset), etc. BasicType rtype = sig->return_type()->basic_type(); ! assert(rtype == type || (rtype == T_OBJECT && type == T_VALUETYPE), "getter must return the expected value"); ! assert(sig->count() == 2 || (type == T_VALUETYPE && sig->count() == 3), "oop getter has 2 or 3 arguments"); assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object"); assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct"); } else { // void putReference(Object base, int/long offset, Object x), etc. assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value"); ! assert(sig->count() == 3 || (type == T_VALUETYPE && sig->count() == 4), "oop putter has 3 arguments"); assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object"); assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct"); BasicType vtype = sig->type_at(sig->count()-1)->basic_type(); ! assert(vtype == type || (type == T_VALUETYPE && vtype == T_OBJECT), "putter must accept the expected value"); } #endif // ASSERT } #endif //PRODUCT
*** 2418,2445 **** // We currently rely on the cookies produced by Unsafe.xxxFieldOffset // to be plain byte offsets, which are also the same as those accepted // by oopDesc::field_addr. assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); // 32-bit machines ignore the high half! offset = ConvL2X(offset); adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed); if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) { heap_base_oop = base; ! } else if (type == T_OBJECT) { return false; // off-heap oop accesses are not supported } // Can base be NULL? Otherwise, always on-heap access. bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base)); if (!can_access_non_heap) { decorators |= IN_HEAP; } ! val = is_store ? argument(4) : NULL; const TypePtr* adr_type = _gvn.type(adr)->isa_ptr(); if (adr_type == TypePtr::NULL_PTR) { return false; // off-heap access with zero address } --- 2445,2532 ---- // We currently rely on the cookies produced by Unsafe.xxxFieldOffset // to be plain byte offsets, which are also the same as those accepted // by oopDesc::field_addr. assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); + + ciValueKlass* value_klass = NULL; + if (type == T_VALUETYPE) { + Node* cls = null_check(argument(4)); + if (stopped()) { + return true; + } + Node* kls = load_klass_from_mirror(cls, false, NULL, 0); + const TypeKlassPtr* kls_t = _gvn.type(kls)->isa_klassptr(); + if (!kls_t->klass_is_exact()) { + return false; + } + ciKlass* klass = kls_t->klass(); + if (!klass->is_valuetype()) { + return false; + } + value_klass = klass->as_value_klass(); + } + + receiver = null_check(receiver); + if (stopped()) { + return true; + } + + if (base->is_ValueType()) { + ValueTypeNode* vt = base->as_ValueType(); + + if (is_store) { + if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) { + return false; + } + base = vt->get_oop(); + } else { + if (offset->is_Con()) { + long off = find_long_con(offset, 0); + ciValueKlass* vk = vt->type()->value_klass(); + if ((long)(int)off != off || !vk->contains_field_offset(off)) { + return false; + } + + ciField* f = vk->get_non_flattened_field_by_offset((int)off); + + if (f != NULL) { + BasicType bt = f->layout_type(); + if (bt == T_ARRAY || bt == T_NARROWOOP) { + bt = T_OBJECT; + } + if (bt == type) { + if (bt != T_VALUETYPE || f->type() == value_klass) { + set_result(vt->field_value_by_offset((int)off, false)); + return true; + } + } + } + } + vt = vt->allocate(this)->as_ValueType(); + base = vt->get_oop(); + } + } + // 32-bit machines ignore the high half! offset = ConvL2X(offset); adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed); if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) { heap_base_oop = base; ! } else if (type == T_OBJECT || (value_klass != NULL && value_klass->has_object_fields())) { return false; // off-heap oop accesses are not supported } // Can base be NULL? Otherwise, always on-heap access. bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base)); if (!can_access_non_heap) { decorators |= IN_HEAP; } ! val = is_store ? argument(4 + (type == T_VALUETYPE ? 1 : 0)) : NULL; const TypePtr* adr_type = _gvn.type(adr)->isa_ptr(); if (adr_type == TypePtr::NULL_PTR) { return false; // off-heap access with zero address }
*** 2452,2462 **** alias_type->adr_type() == TypeAryPtr::RANGE) { return false; // not supported } bool mismatched = false; ! BasicType bt = alias_type->basic_type(); if (bt != T_ILLEGAL) { assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access"); if (bt == T_BYTE && adr_type->isa_aryptr()) { // Alias type doesn't differentiate between byte[] and boolean[]). // Use address type to get the element type. --- 2539,2573 ---- alias_type->adr_type() == TypeAryPtr::RANGE) { return false; // not supported } bool mismatched = false; ! BasicType bt = T_ILLEGAL; ! ciField* field = NULL; ! if (adr_type->isa_instptr()) { ! const TypeInstPtr* instptr = adr_type->is_instptr(); ! ciInstanceKlass* k = instptr->klass()->as_instance_klass(); ! int off = instptr->offset(); ! if (instptr->const_oop() != NULL && ! instptr->klass() == ciEnv::current()->Class_klass() && ! instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) { ! k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); ! field = k->get_field_by_offset(off, true); ! } else { ! field = k->get_non_flattened_field_by_offset(off); ! } ! if (field != NULL) { ! bt = field->layout_type(); ! } ! assert(bt == alias_type->basic_type() || bt == T_VALUETYPE, "should match"); ! if (field != NULL && bt == T_VALUETYPE && !field->is_flattened()) { ! bt = T_OBJECT; ! } ! } else { ! bt = alias_type->basic_type(); ! } ! if (bt != T_ILLEGAL) { assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access"); if (bt == T_BYTE && adr_type->isa_aryptr()) { // Alias type doesn't differentiate between byte[] and boolean[]). // Use address type to get the element type.
*** 2473,2482 **** --- 2584,2614 ---- mismatched = (bt != type); } else if (alias_type->adr_type()->isa_oopptr()) { mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched } + if (type == T_VALUETYPE) { + if (adr_type->isa_instptr()) { + if (field == NULL || field->type() != value_klass) { + mismatched = true; + } + } else if (adr_type->isa_aryptr()) { + const Type* elem = adr_type->is_aryptr()->elem(); + if (!elem->isa_valuetype()) { + mismatched = true; + } else if (elem->value_klass() != value_klass) { + mismatched = true; + } + } + if (is_store) { + const Type* val_t = _gvn.type(val); + if (!val_t->isa_valuetype() || val_t->value_klass() != value_klass) { + return false; + } + } + } + assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched"); if (mismatched) { decorators |= C2_MISMATCHED; }
*** 2485,2521 **** const Type *value_type = Type::get_const_basic_type(type); // Figure out the memory ordering. decorators |= mo_decorator_for_access_kind(kind); ! if (!is_store && type == T_OBJECT) { ! const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type); ! if (tjp != NULL) { ! value_type = tjp; } } - receiver = null_check(receiver); - if (stopped()) { - return true; - } // Heap pointers get a null-check from the interpreter, // as a courtesy. However, this is not guaranteed by Unsafe, // and it is not possible to fully distinguish unintended nulls // from intended ones in this API. if (!is_store) { Node* p = NULL; // Try to constant fold a load from a constant field ! ciField* field = alias_type->field(); if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) { // final or stable field p = make_constant_from_field(field, heap_base_oop); } if (p == NULL) { // Could not constant fold the load ! p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators); // Normalize the value returned by getBoolean in the following cases if (type == T_BOOLEAN && (mismatched || heap_base_oop == top() || // - heap_base_oop is NULL or (can_access_non_heap && field == NULL)) // - heap_base_oop is potentially NULL --- 2617,2663 ---- const Type *value_type = Type::get_const_basic_type(type); // Figure out the memory ordering. decorators |= mo_decorator_for_access_kind(kind); ! if (!is_store) { ! if (type == T_OBJECT) { ! const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type); ! if (tjp != NULL) { ! value_type = tjp; ! } ! } else if (type == T_VALUETYPE) { ! value_type = NULL; } } // Heap pointers get a null-check from the interpreter, // as a courtesy. However, this is not guaranteed by Unsafe, // and it is not possible to fully distinguish unintended nulls // from intended ones in this API. if (!is_store) { Node* p = NULL; // Try to constant fold a load from a constant field ! if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) { // final or stable field p = make_constant_from_field(field, heap_base_oop); } if (p == NULL) { // Could not constant fold the load ! if (type == T_VALUETYPE) { ! if (adr_type->isa_instptr() && !mismatched) { ! ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass(); ! int offset = adr_type->is_instptr()->offset(); ! p = ValueTypeNode::make_from_flattened(this, value_klass, base, base, holder, offset, decorators); ! } else { ! p = ValueTypeNode::make_from_flattened(this, value_klass, base, adr, NULL, 0, decorators); ! } ! } else { ! p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators); ! } // Normalize the value returned by getBoolean in the following cases if (type == T_BOOLEAN && (mismatched || heap_base_oop == top() || // - heap_base_oop is NULL or (can_access_non_heap && field == NULL)) // - heap_base_oop is potentially NULL
*** 2538,2547 **** --- 2680,2697 ---- } if (type == T_ADDRESS) { p = gvn().transform(new CastP2XNode(NULL, p)); p = ConvX2UL(p); } + if (field != NULL && field->is_flattenable() && !field->is_flattened()) { + // Load a non-flattened but flattenable value type from memory + if (value_type->value_klass()->is_scalarizable()) { + p = ValueTypeNode::make_from_oop(this, p, value_type->value_klass()); + } else { + p = null2default(p, value_type->value_klass()); + } + } // The load node has the control of the preceding MemBarCPUOrder. All // following nodes will have the control of the MemBarCPUOrder inserted at // the end of this method. So, pushing the load onto the stack at a later // point is fine. set_result(p);
*** 2549,2564 **** if (bt == T_ADDRESS) { // Repackage the long as a pointer. val = ConvL2X(val); val = gvn().transform(new CastX2PNode(val)); } ! access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators); } return true; } //----------------------------inline_unsafe_load_store---------------------------- // This method serves a couple of different customers (depending on LoadStoreKind): // // LS_cmp_swap: // --- 2699,2771 ---- if (bt == T_ADDRESS) { // Repackage the long as a pointer. val = ConvL2X(val); val = gvn().transform(new CastX2PNode(val)); } ! if (type == T_VALUETYPE) { ! if (adr_type->isa_instptr() && !mismatched) { ! ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass(); ! int offset = adr_type->is_instptr()->offset(); ! val->as_ValueType()->store_flattened(this, base, base, holder, offset, decorators); ! } else { ! val->as_ValueType()->store_flattened(this, base, adr, NULL, 0, decorators); ! } ! } else { ! access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators); ! } ! } ! ! if (argument(1)->is_ValueType() && is_store) { ! Node* value = ValueTypeNode::make_from_oop(this, base, _gvn.type(base)->value_klass()); ! value = value->as_ValueType()->make_larval(this, false); ! replace_in_map(argument(1), value); } return true; } + bool LibraryCallKit::inline_unsafe_make_private_buffer() { + Node* receiver = argument(0); + Node* value = argument(1); + + receiver = null_check(receiver); + if (stopped()) { + return true; + } + + if (!value->is_ValueType()) { + return false; + } + + set_result(value->as_ValueType()->make_larval(this, true)); + + return true; + } + + bool LibraryCallKit::inline_unsafe_finish_private_buffer() { + Node* receiver = argument(0); + Node* buffer = argument(1); + + receiver = null_check(receiver); + if (stopped()) { + return true; + } + + if (!buffer->is_ValueType()) { + return false; + } + + ValueTypeNode* vt = buffer->as_ValueType(); + if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) { + return false; + } + + set_result(vt->finish_larval(this)); + + return true; + } + //----------------------------inline_unsafe_load_store---------------------------- // This method serves a couple of different customers (depending on LoadStoreKind): // // LS_cmp_swap: //
*** 3087,3105 **** C->set_has_split_ifs(true); // Has chance for split-if optimization set_result(result_rgn, result_val); return true; } - //---------------------------load_mirror_from_klass---------------------------- - // Given a klass oop, load its java mirror (a java.lang.Class oop). - Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { - Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); - Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); - // mirror = ((OopHandle)mirror)->resolve(); - return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE); - } - //-----------------------load_klass_from_mirror_common------------------------- // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop. // Test the klass oop for null (signifying a primitive Class like Integer.TYPE), // and branch to the given path on the region. // If never_see_null, take an uncommon trap on null, so we can optimistically --- 3294,3303 ----
*** 3142,3151 **** --- 3340,3353 ---- } Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) { return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region); } + Node* LibraryCallKit::generate_value_guard(Node* kls, RegionNode* region) { + return generate_access_flags_guard(kls, JVM_ACC_VALUE, 0, region); + } + //-------------------------inline_native_Class_query------------------- bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { const Type* return_type = TypeInt::BOOL; Node* prim_return_value = top(); // what happens if it's a primitive class? bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
*** 3315,3324 **** --- 3517,3553 ---- C->set_has_split_ifs(true); // Has chance for split-if optimization set_result(region, phi); return true; } + //-------------------------inline_value_Class_conversion------------------- + // public Class<T> java.lang.Class.asPrimaryType(); + // public Class<T> java.lang.Class.asIndirectType() + bool LibraryCallKit::inline_value_Class_conversion(vmIntrinsics::ID id) { + Node* mirror = argument(0); // Receiver Class + const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr(); + if (mirror_con == NULL) { + return false; + } + + bool is_indirect_type = true; + ciType* tm = mirror_con->java_mirror_type(&is_indirect_type); + if (tm != NULL) { + Node* result = mirror; + if (tm->is_valuetype()) { + if (id == vmIntrinsics::_asPrimaryType && is_indirect_type) { + result = _gvn.makecon(TypeInstPtr::make(tm->as_value_klass()->inline_mirror_instance())); + } else if (id == vmIntrinsics::_asIndirectType && !is_indirect_type) { + result = _gvn.makecon(TypeInstPtr::make(tm->as_value_klass()->indirect_mirror_instance())); + } + } + set_result(result); + return true; + } + return false; + } + //-------------------------inline_Class_cast------------------- bool LibraryCallKit::inline_Class_cast() { Node* mirror = argument(0); // Class Node* obj = argument(1); const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
*** 3326,3347 **** return false; // dead path (mirror->is_top()). } if (obj == NULL || obj->is_top()) { return false; // dead path } ! const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr(); // First, see if Class.cast() can be folded statically. // java_mirror_type() returns non-null for compile-time Class constants. ! ciType* tm = mirror_con->java_mirror_type(); ! if (tm != NULL && tm->is_klass() && ! tp != NULL && tp->klass() != NULL) { ! if (!tp->klass()->is_loaded()) { // Don't use intrinsic when class is not loaded. return false; } else { ! int static_res = C->static_subtype_check(tm->as_klass(), tp->klass()); if (static_res == Compile::SSC_always_true) { // isInstance() is true - fold the code. set_result(obj); return true; } else if (static_res == Compile::SSC_always_false) { --- 3555,3591 ---- return false; // dead path (mirror->is_top()). } if (obj == NULL || obj->is_top()) { return false; // dead path } ! ! ciKlass* obj_klass = NULL; ! if (obj->is_ValueType()) { ! obj_klass = _gvn.type(obj)->value_klass(); ! } else { ! const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr(); ! if (tp != NULL) { ! obj_klass = tp->klass(); ! } ! } // First, see if Class.cast() can be folded statically. // java_mirror_type() returns non-null for compile-time Class constants. ! bool is_indirect_type = true; ! ciType* tm = mirror_con->java_mirror_type(&is_indirect_type); ! if (!obj->is_ValueType() && !is_indirect_type) { ! obj = null_check(obj); ! if (stopped()) { ! return true; ! } ! } ! if (tm != NULL && tm->is_klass() && obj_klass != NULL) { ! if (!obj_klass->is_loaded()) { // Don't use intrinsic when class is not loaded. return false; } else { ! int static_res = C->static_subtype_check(tm->as_klass(), obj_klass); if (static_res == Compile::SSC_always_true) { // isInstance() is true - fold the code. set_result(obj); return true; } else if (static_res == Compile::SSC_always_false) {
*** 3367,3394 **** if (stopped()) { return true; } // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive). ! enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT }; RegionNode* region = new RegionNode(PATH_LIMIT); record_for_igvn(region); // Now load the mirror's klass metaobject, and null-check it. // If kls is null, we have a primitive mirror and // nothing is an instance of a primitive type. Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path); Node* res = top(); if (!stopped()) { Node* bad_type_ctrl = top(); // Do checkcast optimizations. res = gen_checkcast(obj, kls, &bad_type_ctrl); region->init_req(_bad_type_path, bad_type_ctrl); } if (region->in(_prim_path) != top() || ! region->in(_bad_type_path) != top()) { // Let Interpreter throw ClassCastException. PreserveJVMState pjvms(this); set_control(_gvn.transform(region)); uncommon_trap(Deoptimization::Reason_intrinsic, Deoptimization::Action_maybe_recompile); --- 3611,3658 ---- if (stopped()) { return true; } // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive). ! enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT }; RegionNode* region = new RegionNode(PATH_LIMIT); record_for_igvn(region); // Now load the mirror's klass metaobject, and null-check it. // If kls is null, we have a primitive mirror and // nothing is an instance of a primitive type. Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path); Node* res = top(); if (!stopped()) { + if (EnableValhalla && !obj->is_ValueType() && is_indirect_type) { + // Check if (mirror == inline_mirror && obj == null) + Node* is_val_mirror = generate_fair_guard(is_value_mirror(mirror), NULL); + if (is_val_mirror != NULL) { + RegionNode* r = new RegionNode(3); + record_for_igvn(r); + r->init_req(1, control()); + + // Casting to .val, check for null + set_control(is_val_mirror); + Node *null_ctr = top(); + null_check_oop(obj, &null_ctr); + region->init_req(_npe_path, null_ctr); + r->init_req(2, control()); + + set_control(_gvn.transform(r)); + } + } + Node* bad_type_ctrl = top(); // Do checkcast optimizations. res = gen_checkcast(obj, kls, &bad_type_ctrl); region->init_req(_bad_type_path, bad_type_ctrl); } if (region->in(_prim_path) != top() || ! region->in(_bad_type_path) != top() || ! region->in(_npe_path) != top()) { // Let Interpreter throw ClassCastException. PreserveJVMState pjvms(this); set_control(_gvn.transform(region)); uncommon_trap(Deoptimization::Reason_intrinsic, Deoptimization::Action_maybe_recompile);
*** 3421,3432 **** --- 3685,3698 ---- _both_ref_path, // {N,N} & subtype check loses => false PATH_LIMIT }; RegionNode* region = new RegionNode(PATH_LIMIT); + RegionNode* prim_region = new RegionNode(2); Node* phi = new PhiNode(region, TypeInt::BOOL); record_for_igvn(region); + record_for_igvn(prim_region); const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
*** 3451,3484 **** bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); for (which_arg = 0; which_arg <= 1; which_arg++) { Node* kls = klasses[which_arg]; Node* null_ctl = top(); kls = null_check_oop(kls, &null_ctl, never_see_null); ! int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path); ! region->init_req(prim_path, null_ctl); if (stopped()) break; klasses[which_arg] = kls; } if (!stopped()) { // now we have two reference types, in klasses[0..1] Node* subk = klasses[1]; // the argument to isAssignableFrom Node* superk = klasses[0]; // the receiver region->set_req(_both_ref_path, gen_subtype_check(subk, superk)); // now we have a successful reference subtype check region->set_req(_ref_subtype_path, control()); } // If both operands are primitive (both klasses null), then // we must return true when they are identical primitives. // It is convenient to test this after the first null klass check. ! set_control(region->in(_prim_0_path)); // go back to first null check if (!stopped()) { // Since superc is primitive, make a guard for the superc==subc case. Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1])); Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq)); ! generate_guard(bol_eq, region, PROB_FAIR); if (region->req() == PATH_LIMIT+1) { // A guard was added. If the added guard is taken, superc==subc. region->swap_edges(PATH_LIMIT, _prim_same_path); region->del_req(PATH_LIMIT); } --- 3717,3757 ---- bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); for (which_arg = 0; which_arg <= 1; which_arg++) { Node* kls = klasses[which_arg]; Node* null_ctl = top(); kls = null_check_oop(kls, &null_ctl, never_see_null); ! if (which_arg == 0) { ! prim_region->init_req(1, null_ctl); ! } else { ! region->init_req(_prim_1_path, null_ctl); ! } if (stopped()) break; klasses[which_arg] = kls; } if (!stopped()) { // now we have two reference types, in klasses[0..1] Node* subk = klasses[1]; // the argument to isAssignableFrom Node* superk = klasses[0]; // the receiver region->set_req(_both_ref_path, gen_subtype_check(subk, superk)); + // If superc is a value mirror, we also need to check if superc == subc because + // V? is not a subtype of V but due to subk == superk the subtype check will pass. + generate_fair_guard(is_value_mirror(args[0]), prim_region); // now we have a successful reference subtype check region->set_req(_ref_subtype_path, control()); } // If both operands are primitive (both klasses null), then // we must return true when they are identical primitives. // It is convenient to test this after the first null klass check. ! // This path is also used if superc is a value mirror. ! set_control(_gvn.transform(prim_region)); if (!stopped()) { // Since superc is primitive, make a guard for the superc==subc case. Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1])); Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq)); ! generate_fair_guard(bol_eq, region); if (region->req() == PATH_LIMIT+1) { // A guard was added. If the added guard is taken, superc==subc. region->swap_edges(PATH_LIMIT, _prim_same_path); region->del_req(PATH_LIMIT); }
*** 3505,3563 **** set_result(_gvn.transform(phi)); return true; } //---------------------generate_array_guard_common------------------------ ! Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ! bool obj_array, bool not_array) { if (stopped()) { return NULL; } - // If obj_array/non_array==false/false: - // Branch around if the given klass is in fact an array (either obj or prim). - // If obj_array/non_array==false/true: - // Branch around if the given klass is not an array klass of any kind. - // If obj_array/non_array==true/true: - // Branch around if the kls is not an oop array (kls is int[], String, etc.) - // If obj_array/non_array==true/false: - // Branch around if the kls is an oop array (Object[] or subtype) - // // Like generate_guard, adds a new path onto the region. jint layout_con = 0; Node* layout_val = get_layout_helper(kls, layout_con); if (layout_val == NULL) { ! bool query = (obj_array ! ? Klass::layout_helper_is_objArray(layout_con) ! : Klass::layout_helper_is_array(layout_con)); ! if (query == not_array) { return NULL; // never a branch } else { // always a branch Node* always_branch = control(); if (region != NULL) region->add_req(always_branch); set_control(top()); return always_branch; } } // Now test the correct condition. ! jint nval = (obj_array ! ? (jint)(Klass::_lh_array_tag_type_value ! << Klass::_lh_array_tag_shift) ! : Klass::_lh_neutral_value); Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval))); - BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array - // invert the test if we are looking for a non-array - if (not_array) btest = BoolTest(btest).negate(); Node* bol = _gvn.transform(new BoolNode(cmp, btest)); return generate_fair_guard(bol, region); } //-----------------------inline_native_newArray-------------------------- ! // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length); // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size); bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) { Node* mirror; Node* count_val; if (uninitialized) { --- 3778,3855 ---- set_result(_gvn.transform(phi)); return true; } //---------------------generate_array_guard_common------------------------ ! Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) { if (stopped()) { return NULL; } // Like generate_guard, adds a new path onto the region. jint layout_con = 0; Node* layout_val = get_layout_helper(kls, layout_con); if (layout_val == NULL) { ! bool query = 0; ! switch(kind) { ! case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break; ! case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break; ! case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break; ! case ValueArray: query = Klass::layout_helper_is_valueArray(layout_con); break; ! case AnyArray: query = Klass::layout_helper_is_array(layout_con); break; ! case NonArray: query = !Klass::layout_helper_is_array(layout_con); break; ! default: ! ShouldNotReachHere(); ! } ! if (!query) { return NULL; // never a branch } else { // always a branch Node* always_branch = control(); if (region != NULL) region->add_req(always_branch); set_control(top()); return always_branch; } } + unsigned int value = 0; + BoolTest::mask btest = BoolTest::illegal; + switch(kind) { + case ObjectArray: + case NonObjectArray: { + value = Klass::_lh_array_tag_obj_value; + layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + btest = kind == ObjectArray ? BoolTest::eq : BoolTest::ne; + break; + } + case TypeArray: { + value = Klass::_lh_array_tag_type_value; + layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + btest = BoolTest::eq; + break; + } + case ValueArray: { + value = Klass::_lh_array_tag_vt_value; + layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + btest = BoolTest::eq; + break; + } + case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break; + case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break; + default: + ShouldNotReachHere(); + } // Now test the correct condition. ! jint nval = (jint)value; Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval))); Node* bol = _gvn.transform(new BoolNode(cmp, btest)); return generate_fair_guard(bol, region); } //-----------------------inline_native_newArray-------------------------- ! // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length); // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size); bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) { Node* mirror; Node* count_val; if (uninitialized) {
*** 3604,3614 **** set_control(normal_ctl); if (!stopped()) { // Normal case: The array type has been cached in the java.lang.Class. // The following call works fine even if the array type is polymorphic. // It could be a dynamic mix of int[], boolean[], Object[], etc. ! Node* obj = new_array(klass_node, count_val, 0); // no arguments to push result_reg->init_req(_normal_path, control()); result_val->init_req(_normal_path, obj); result_io ->init_req(_normal_path, i_o()); result_mem->init_req(_normal_path, reset_memory()); --- 3896,3906 ---- set_control(normal_ctl); if (!stopped()) { // Normal case: The array type has been cached in the java.lang.Class. // The following call works fine even if the array type is polymorphic. // It could be a dynamic mix of int[], boolean[], Object[], etc. ! Node* obj = new_array(klass_node, count_val, 0, NULL, false, mirror); // no arguments to push result_reg->init_req(_normal_path, control()); result_val->init_req(_normal_path, obj); result_io ->init_req(_normal_path, i_o()); result_mem->init_req(_normal_path, reset_memory());
*** 3669,3678 **** --- 3961,3983 ---- Node* original = argument(0); Node* start = is_copyOfRange? argument(1): intcon(0); Node* end = is_copyOfRange? argument(2): argument(1); Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); + const TypeAryPtr* original_t = _gvn.type(original)->isa_aryptr(); + const TypeInstPtr* mirror_t = _gvn.type(array_type_mirror)->isa_instptr(); + if (EnableValhalla && ValueArrayFlatten && + (original_t == NULL || mirror_t == NULL || + (mirror_t->java_mirror_type() == NULL && + (original_t->elem()->isa_valuetype() || + (original_t->elem()->make_oopptr() != NULL && + original_t->elem()->make_oopptr()->can_be_value_type()))))) { + // We need to know statically if the copy is to a flattened array + // or not but can't tell. + return false; + } + Node* newcopy = NULL; // Set the original stack and the reexecute bit for the interpreter to reexecute // the bytecode that invokes Arrays.copyOf if deoptimization happens. { PreserveReexecuteState preexecs(this);
*** 3692,3711 **** RegionNode* bailout = new RegionNode(1); record_for_igvn(bailout); // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. // Bail out if that is so. ! Node* not_objArray = generate_non_objArray_guard(klass_node, bailout); if (not_objArray != NULL) { // Improve the klass node's type from the new optimistic assumption: ciKlass* ak = ciArrayKlass::make(env()->Object_klass()); ! const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); Node* cast = new CastPPNode(klass_node, akls); cast->init_req(0, control()); klass_node = _gvn.transform(cast); } // Bail out if either start or end is negative. generate_negative_guard(start, bailout, &start); generate_negative_guard(end, bailout, &end); Node* length = end; --- 3997,4062 ---- RegionNode* bailout = new RegionNode(1); record_for_igvn(bailout); // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. // Bail out if that is so. ! // Value type array may have object field that would require a ! // write barrier. Conservatively, go to slow path. ! BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); ! Node* not_objArray = !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing) ? ! generate_typeArray_guard(klass_node, bailout) : generate_non_objArray_guard(klass_node, bailout); if (not_objArray != NULL) { // Improve the klass node's type from the new optimistic assumption: ciKlass* ak = ciArrayKlass::make(env()->Object_klass()); ! const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0)); Node* cast = new CastPPNode(klass_node, akls); cast->init_req(0, control()); klass_node = _gvn.transform(cast); } + Node* original_kls = load_object_klass(original); + // ArrayCopyNode:Ideal may transform the ArrayCopyNode to + // loads/stores but it is legal only if we're sure the + // Arrays.copyOf would succeed. So we need all input arguments + // to the copyOf to be validated, including that the copy to the + // new array won't trigger an ArrayStoreException. That subtype + // check can be optimized if we know something on the type of + // the input array from type speculation. + if (_gvn.type(klass_node)->singleton() && !stopped()) { + ciKlass* subk = _gvn.type(original_kls)->is_klassptr()->klass(); + ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass(); + + int test = C->static_subtype_check(superk, subk); + if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) { + const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr(); + if (t_original->speculative_type() != NULL) { + original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true); + original_kls = load_object_klass(original); + } + } + } + + if (ValueArrayFlatten) { + // Either both or neither new array klass and original array + // klass must be flattened + Node* is_flat = generate_valueArray_guard(klass_node, NULL); + if (!original_t->is_not_flat()) { + generate_valueArray_guard(original_kls, bailout); + } + if (is_flat != NULL) { + RegionNode* r = new RegionNode(2); + record_for_igvn(r); + r->init_req(1, control()); + set_control(is_flat); + if (!original_t->is_not_flat()) { + generate_valueArray_guard(original_kls, r); + } + bailout->add_req(control()); + set_control(_gvn.transform(r)); + } + } + // Bail out if either start or end is negative. generate_negative_guard(start, bailout, &start); generate_negative_guard(end, bailout, &end); Node* length = end;
*** 3738,3772 **** // We know the copy is disjoint but we might not know if the // oop stores need checking. // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). // This will fail a store-check if x contains any non-nulls. - // ArrayCopyNode:Ideal may transform the ArrayCopyNode to - // loads/stores but it is legal only if we're sure the - // Arrays.copyOf would succeed. So we need all input arguments - // to the copyOf to be validated, including that the copy to the - // new array won't trigger an ArrayStoreException. That subtype - // check can be optimized if we know something on the type of - // the input array from type speculation. - if (_gvn.type(klass_node)->singleton()) { - ciKlass* subk = _gvn.type(load_object_klass(original))->is_klassptr()->klass(); - ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass(); - - int test = C->static_subtype_check(superk, subk); - if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) { - const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr(); - if (t_original->speculative_type() != NULL) { - original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true); - } - } - } - bool validated = false; // Reason_class_check rather than Reason_intrinsic because we // want to intrinsify even if this traps. if (!too_many_traps(Deoptimization::Reason_class_check)) { ! Node* not_subtype_ctrl = gen_subtype_check(load_object_klass(original), klass_node); if (not_subtype_ctrl != top()) { PreserveJVMState pjvms(this); set_control(not_subtype_ctrl); --- 4089,4103 ---- // We know the copy is disjoint but we might not know if the // oop stores need checking. // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). // This will fail a store-check if x contains any non-nulls. bool validated = false; // Reason_class_check rather than Reason_intrinsic because we // want to intrinsify even if this traps. if (!too_many_traps(Deoptimization::Reason_class_check)) { ! Node* not_subtype_ctrl = gen_subtype_check(original_kls, klass_node); if (not_subtype_ctrl != top()) { PreserveJVMState pjvms(this); set_control(not_subtype_ctrl);
*** 3776,3789 **** } validated = true; } if (!stopped()) { ! newcopy = new_array(klass_node, length, 0); // no arguments to push ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false, ! load_object_klass(original), klass_node); if (!is_copyOfRange) { ac->set_copyof(validated); } else { ac->set_copyofrange(validated); } --- 4107,4124 ---- } validated = true; } if (!stopped()) { ! // Load element mirror ! Node* p = basic_plus_adr(array_type_mirror, java_lang_Class::component_mirror_offset_in_bytes()); ! Node* elem_mirror = access_load_at(array_type_mirror, p, _gvn.type(p)->is_ptr(), TypeInstPtr::MIRROR, T_OBJECT, IN_HEAP); ! ! newcopy = new_array(klass_node, length, 0, NULL, false, elem_mirror); ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false, ! original_kls, klass_node); if (!is_copyOfRange) { ac->set_copyof(validated); } else { ac->set_copyofrange(validated); }
*** 3903,3923 **** RegionNode* result_reg = new RegionNode(PATH_LIMIT); PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT); PhiNode* result_io = new PhiNode(result_reg, Type::ABIO); PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); ! Node* obj = NULL; if (!is_static) { // Check for hashing null object obj = null_check_receiver(); if (stopped()) return true; // unconditionally null result_reg->init_req(_null_path, top()); result_val->init_req(_null_path, top()); } else { // Do a null check, and return zero if null. // System.identityHashCode(null) == 0 - obj = argument(0); Node* null_ctl = top(); obj = null_check_oop(obj, &null_ctl); result_reg->init_req(_null_path, null_ctl); result_val->init_req(_null_path, _gvn.intcon(0)); } --- 4238,4262 ---- RegionNode* result_reg = new RegionNode(PATH_LIMIT); PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT); PhiNode* result_io = new PhiNode(result_reg, Type::ABIO); PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); ! Node* obj = argument(0); ! ! if (obj->is_ValueType() || gvn().type(obj)->is_valuetypeptr()) { ! return false; ! } ! if (!is_static) { // Check for hashing null object obj = null_check_receiver(); if (stopped()) return true; // unconditionally null result_reg->init_req(_null_path, top()); result_val->init_req(_null_path, top()); } else { // Do a null check, and return zero if null. // System.identityHashCode(null) == 0 Node* null_ctl = top(); obj = null_check_oop(obj, &null_ctl); result_reg->init_req(_null_path, null_ctl); result_val->init_req(_null_path, _gvn.intcon(0)); }
*** 3933,3942 **** --- 4272,4288 ---- // We only go to the fast case code if we pass a number of guards. The // paths which do not pass are accumulated in the slow_region. RegionNode* slow_region = new RegionNode(1); record_for_igvn(slow_region); + const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); + assert(!obj_type->isa_valuetype() || !obj_type->is_valuetypeptr(), "no value type here"); + if (is_static && obj_type->can_be_value_type()) { + Node* obj_klass = load_object_klass(obj); + generate_value_guard(obj_klass, slow_region); + } + // If this is a virtual call, we generate a funny guard. We pull out // the vtable entry corresponding to hashCode() from the target object. // If the target method which we are calling happens to be the native // Object hashCode() method, we pass the guard. We do not need this // guard for non-virtual calls -- the caller is known to be the native
*** 4019,4029 **** //---------------------------inline_native_getClass---------------------------- // public final native Class<?> java.lang.Object.getClass(); // // Build special case code for calls to getClass on an object. bool LibraryCallKit::inline_native_getClass() { ! Node* obj = null_check_receiver(); if (stopped()) return true; set_result(load_mirror_from_klass(load_object_klass(obj))); return true; } --- 4365,4381 ---- //---------------------------inline_native_getClass---------------------------- // public final native Class<?> java.lang.Object.getClass(); // // Build special case code for calls to getClass on an object. bool LibraryCallKit::inline_native_getClass() { ! Node* obj = argument(0); ! if (obj->is_ValueType()) { ! ciKlass* vk = _gvn.type(obj)->value_klass(); ! set_result(makecon(TypeInstPtr::make(vk->java_mirror()))); ! return true; ! } ! obj = null_check_receiver(); if (stopped()) return true; set_result(load_mirror_from_klass(load_object_klass(obj))); return true; }
*** 4276,4286 **** // Copy the fastest available way. // TODO: generate fields copies for small objects instead. Node* size = _gvn.transform(obj_size); ! access_clone(obj, alloc_obj, size, is_array); // Do not let reads from the cloned object float above the arraycopy. if (alloc != NULL) { // Do not let stores that initialize this object be reordered with // a subsequent store that would make this object accessible by --- 4628,4665 ---- // Copy the fastest available way. // TODO: generate fields copies for small objects instead. Node* size = _gvn.transform(obj_size); ! // Exclude the header but include array length to copy by 8 bytes words. ! // Can't use base_offset_in_bytes(bt) since basic type is unknown. ! int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() : ! instanceOopDesc::base_offset_in_bytes(); ! // base_off: ! // 8 - 32-bit VM ! // 12 - 64-bit VM, compressed klass ! // 16 - 64-bit VM, normal klass ! if (base_off % BytesPerLong != 0) { ! assert(UseCompressedClassPointers, ""); ! if (is_array) { ! // Exclude length to copy by 8 bytes words. ! base_off += sizeof(int); ! } else { ! // Include klass to copy by 8 bytes words. ! base_off = instanceOopDesc::klass_offset_in_bytes(); ! } ! assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment"); ! } ! Node* src_base = basic_plus_adr(obj, base_off); ! Node* dst_base = basic_plus_adr(alloc_obj, base_off); ! ! // Compute the length also, if needed: ! Node* countx = size; ! countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); ! countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); ! ! access_clone(src_base, dst_base, countx, is_array); // Do not let reads from the cloned object float above the arraycopy. if (alloc != NULL) { // Do not let stores that initialize this object be reordered with // a subsequent store that would make this object accessible by
*** 4319,4339 **** // Set the reexecute bit for the interpreter to reexecute // the bytecode that invokes Object.clone if deoptimization happens. { PreserveReexecuteState preexecs(this); jvms()->set_should_reexecute(true); ! Node* obj = null_check_receiver(); if (stopped()) return true; const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); // If we are going to clone an instance, we need its exact type to // know the number and types of fields to convert the clone to // loads/stores. Maybe a speculative type can help us. if (!obj_type->klass_is_exact() && obj_type->speculative_type() != NULL && ! obj_type->speculative_type()->is_instance_klass()) { ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass(); if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem && !spec_ik->has_injected_fields()) { ciKlass* k = obj_type->klass(); if (!k->is_instance_klass() || --- 4698,4724 ---- // Set the reexecute bit for the interpreter to reexecute // the bytecode that invokes Object.clone if deoptimization happens. { PreserveReexecuteState preexecs(this); jvms()->set_should_reexecute(true); ! Node* obj = argument(0); ! if (obj->is_ValueType()) { ! return false; ! } ! ! obj = null_check_receiver(); if (stopped()) return true; const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); // If we are going to clone an instance, we need its exact type to // know the number and types of fields to convert the clone to // loads/stores. Maybe a speculative type can help us. if (!obj_type->klass_is_exact() && obj_type->speculative_type() != NULL && ! obj_type->speculative_type()->is_instance_klass() && ! !obj_type->speculative_type()->is_valuetype()) { ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass(); if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem && !spec_ik->has_injected_fields()) { ciKlass* k = obj_type->klass(); if (!k->is_instance_klass() ||
*** 4343,4356 **** } } } Node* obj_klass = load_object_klass(obj); - const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr(); - const TypeOopPtr* toop = ((tklass != NULL) - ? tklass->as_instance_type() - : TypeInstPtr::NOTNULL); // Conservatively insert a memory barrier on all memory slices. // Do not let writes into the original float below the clone. insert_mem_bar(Op_MemBarCPUOrder); --- 4728,4737 ----
*** 4366,4430 **** result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL); PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO); PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); record_for_igvn(result_reg); Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); if (array_ctl != NULL) { // It's an array. PreserveJVMState pjvms(this); set_control(array_ctl); - Node* obj_length = load_array_length(obj); - Node* obj_size = NULL; - Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); ! if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) { ! // If it is an oop array, it requires very special treatment, ! // because gc barriers are required when accessing the array. ! Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); ! if (is_obja != NULL) { ! PreserveJVMState pjvms2(this); ! set_control(is_obja); ! obj = access_resolve(obj, ACCESS_READ); ! // Generate a direct call to the right arraycopy function(s). ! Node* alloc = tightly_coupled_allocation(alloc_obj, NULL); ! ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false); ! ac->set_cloneoop(); ! Node* n = _gvn.transform(ac); ! assert(n == ac, "cannot disappear"); ! ac->connect_outputs(this); ! ! result_reg->init_req(_objArray_path, control()); ! result_val->init_req(_objArray_path, alloc_obj); ! result_i_o ->set_req(_objArray_path, i_o()); ! result_mem ->set_req(_objArray_path, reset_memory()); ! } } - // Otherwise, there are no barriers to worry about. - // (We can dispense with card marks if we know the allocation - // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks - // causes the non-eden paths to take compensating steps to - // simulate a fresh allocation, so that no further - // card marks are required in compiled code to initialize - // the object.) if (!stopped()) { ! copy_to_clone(obj, alloc_obj, obj_size, true); ! // Present the results of the copy. ! result_reg->init_req(_array_path, control()); ! result_val->init_req(_array_path, alloc_obj); ! result_i_o ->set_req(_array_path, i_o()); ! result_mem ->set_req(_array_path, reset_memory()); } } - // We only go to the instance fast case code if we pass a number of guards. - // The paths which do not pass are accumulated in the slow_region. - RegionNode* slow_region = new RegionNode(1); - record_for_igvn(slow_region); if (!stopped()) { // It's an instance (we did array above). Make the slow-path tests. // If this is a virtual call, we generate a funny guard. We grab // the vtable entry corresponding to clone() from the target object. // If the target method which we are calling happens to be the --- 4747,4828 ---- result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL); PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO); PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); record_for_igvn(result_reg); + // We only go to the fast case code if we pass a number of guards. + // The paths which do not pass are accumulated in the slow_region. + RegionNode* slow_region = new RegionNode(1); + record_for_igvn(slow_region); + Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); if (array_ctl != NULL) { // It's an array. PreserveJVMState pjvms(this); set_control(array_ctl); BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); ! if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing) && ! (!obj_type->isa_aryptr() || !obj_type->is_aryptr()->is_not_flat())) { ! // Flattened value type array may have object field that would require a ! // write barrier. Conservatively, go to slow path. ! generate_valueArray_guard(obj_klass, slow_region); } if (!stopped()) { ! Node* obj_length = load_array_length(obj); ! Node* obj_size = NULL; ! // Load element mirror ! Node* array_type_mirror = load_mirror_from_klass(obj_klass); ! Node* p = basic_plus_adr(array_type_mirror, java_lang_Class::component_mirror_offset_in_bytes()); ! Node* elem_mirror = access_load_at(array_type_mirror, p, _gvn.type(p)->is_ptr(), TypeInstPtr::MIRROR, T_OBJECT, IN_HEAP); ! ! Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, false, elem_mirror); ! ! BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); ! if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) { ! // If it is an oop array, it requires very special treatment, ! // because gc barriers are required when accessing the array. ! Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); ! if (is_obja != NULL) { ! PreserveJVMState pjvms2(this); ! set_control(is_obja); ! // Generate a direct call to the right arraycopy function(s). ! Node* alloc = tightly_coupled_allocation(alloc_obj, NULL); ! ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false); ! ac->set_cloneoop(); ! Node* n = _gvn.transform(ac); ! assert(n == ac, "cannot disappear"); ! ac->connect_outputs(this); ! ! result_reg->init_req(_objArray_path, control()); ! result_val->init_req(_objArray_path, alloc_obj); ! result_i_o ->set_req(_objArray_path, i_o()); ! result_mem ->set_req(_objArray_path, reset_memory()); ! } ! } ! // Otherwise, there are no barriers to worry about. ! // (We can dispense with card marks if we know the allocation ! // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks ! // causes the non-eden paths to take compensating steps to ! // simulate a fresh allocation, so that no further ! // card marks are required in compiled code to initialize ! // the object.) ! ! if (!stopped()) { ! copy_to_clone(obj, alloc_obj, obj_size, true); ! ! // Present the results of the copy. ! result_reg->init_req(_array_path, control()); ! result_val->init_req(_array_path, alloc_obj); ! result_i_o ->set_req(_array_path, i_o()); ! result_mem ->set_req(_array_path, reset_memory()); ! } } } if (!stopped()) { // It's an instance (we did array above). Make the slow-path tests. // If this is a virtual call, we generate a funny guard. We grab // the vtable entry corresponding to clone() from the target object. // If the target method which we are calling happens to be the
*** 4581,4595 **** map()->replaced_nodes().apply(saved_jvms->map(), new_idx); set_jvms(saved_jvms); _reexecute_sp = saved_reexecute_sp; // Remove the allocation from above the guards ! CallProjections callprojs; ! alloc->extract_projections(&callprojs, true); InitializeNode* init = alloc->initialization(); Node* alloc_mem = alloc->in(TypeFunc::Memory); ! C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O)); C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem); C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0)); // move the allocation here (after the guards) _gvn.hash_delete(alloc); --- 4979,4992 ---- map()->replaced_nodes().apply(saved_jvms->map(), new_idx); set_jvms(saved_jvms); _reexecute_sp = saved_reexecute_sp; // Remove the allocation from above the guards ! CallProjections* callprojs = alloc->extract_projections(true); InitializeNode* init = alloc->initialization(); Node* alloc_mem = alloc->in(TypeFunc::Memory); ! C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O)); C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem); C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0)); // move the allocation here (after the guards) _gvn.hash_delete(alloc);
*** 4597,4607 **** alloc->set_req(TypeFunc::I_O, i_o()); Node *mem = reset_memory(); set_all_memory(mem); alloc->set_req(TypeFunc::Memory, mem); set_control(init->proj_out_or_null(TypeFunc::Control)); ! set_i_o(callprojs.fallthrough_ioproj); // Update memory as done in GraphKit::set_output_for_allocation() const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength)); const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type(); if (ary_type->isa_aryptr() && length_type != NULL) { --- 4994,5004 ---- alloc->set_req(TypeFunc::I_O, i_o()); Node *mem = reset_memory(); set_all_memory(mem); alloc->set_req(TypeFunc::Memory, mem); set_control(init->proj_out_or_null(TypeFunc::Control)); ! set_i_o(callprojs->fallthrough_ioproj); // Update memory as done in GraphKit::set_output_for_allocation() const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength)); const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type(); if (ary_type->isa_aryptr() && length_type != NULL) {
*** 4841,4861 **** set_control(not_subtype_ctrl); uncommon_trap(Deoptimization::Reason_intrinsic, Deoptimization::Action_make_not_entrant); assert(stopped(), "Should be stopped"); } { PreserveJVMState pjvms(this); set_control(_gvn.transform(slow_region)); uncommon_trap(Deoptimization::Reason_intrinsic, Deoptimization::Action_make_not_entrant); assert(stopped(), "Should be stopped"); } - - const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr(); - const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass()); - src = _gvn.transform(new CheckCastPPNode(control(), src, toop)); } arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx); if (stopped()) { --- 5238,5269 ---- set_control(not_subtype_ctrl); uncommon_trap(Deoptimization::Reason_intrinsic, Deoptimization::Action_make_not_entrant); assert(stopped(), "Should be stopped"); } + + const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr(); + const Type* toop = TypeOopPtr::make_from_klass(dest_klass_t->klass()); + src = _gvn.transform(new CheckCastPPNode(control(), src, toop)); + src_type = _gvn.type(src); + top_src = src_type->isa_aryptr(); + + if (top_dest != NULL && !top_dest->elem()->isa_valuetype() && !top_dest->is_not_flat()) { + generate_valueArray_guard(dest_klass, slow_region); + } + + if (top_src != NULL && !top_src->elem()->isa_valuetype() && !top_src->is_not_flat()) { + generate_valueArray_guard(src_klass, slow_region); + } + { PreserveJVMState pjvms(this); set_control(_gvn.transform(slow_region)); uncommon_trap(Deoptimization::Reason_intrinsic, Deoptimization::Action_make_not_entrant); assert(stopped(), "Should be stopped"); } } arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx); if (stopped()) {
< prev index next >