< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page
rev 64191 : Merge

*** 1321,1330 **** --- 1321,1478 ---- // threadObj = ((OopHandle)_threadObj)->resolve(); access_load(IN_NATIVE, T_OBJECT, LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg); } + void LIRGenerator::do_getReferencedObjects(Intrinsic* x) { + BasicTypeList signature; + signature.append(T_OBJECT); // obj + signature.append(T_ARRAY); // reference buffer + + LIRItem a0(x->argument_at(0), this); + LIRItem a1(x->argument_at(1), this); + a0.load_item(); + a1.load_item(); + + LIR_OprList* args = new LIR_OprList(); + args->append(a0.result()); + args->append(a1.result()); + + LIR_Opr result = call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::get_referenced_objects), intType, NULL); + __ move(result, rlock_result(x), NULL); + } + + void LIRGenerator::do_addressOf(Intrinsic* x) { + assert(x->number_of_arguments() == 1, "wrong type"); + LIR_Opr reg = rlock_result(x); + + if (!RuntimeAddressOf) { + __ move(LIR_OprFact::longConst(-1), reg, NULL); + return; + } + + LIRItem value(x->argument_at(0), this); + value.load_item(); + + #ifdef _LP64 + __ move(value.result(), reg, NULL); + __ add(reg, LIR_OprFact::intptrConst(Universe::non_heap_offset()), reg); + #else + LIR_Opr res = new_register(T_INT); + __ move(value.result(), res, NULL); + __ add(res, LIR_OprFact::intptrConst(Universe::non_heap_offset()), res); + __ convert(Bytecodes::_i2l, res, reg); + #endif + } + + void LIRGenerator::do_sizeOf(Intrinsic* x) { + assert(x->number_of_arguments() == 1, "wrong type"); + LIR_Opr result_reg = rlock_result(x); + + if (!RuntimeSizeOf) { + __ move(LIR_OprFact::longConst(-1), result_reg); + return; + } + + LIRItem value(x->argument_at(0), this); + value.load_item(); + + LIR_Opr klass = new_register(T_METADATA); + __ move(new LIR_Address(value.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, NULL); + LIR_Opr layout = new_register(T_INT); + __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout); + + LabelObj* L_done = new LabelObj(); + LabelObj* L_array = new LabelObj(); + + __ cmp(lir_cond_lessEqual, layout, 0); + __ branch(lir_cond_lessEqual, L_array->label()); + + // Instance case: the layout helper gives us instance size almost directly, + // but we need to mask out the _lh_instance_slow_path_bit. + __ convert(Bytecodes::_i2l, layout, result_reg); + + assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit"); + jlong mask = ~(jlong) right_n_bits(LogBytesPerLong); + __ logical_and(result_reg, LIR_OprFact::longConst(mask), result_reg); + + __ branch(lir_cond_always, L_done->label()); + + // Array case: size is round(header + element_size*arraylength). + // Since arraylength is different for every array instance, we have to + // compute the whole thing at runtime. + + __ branch_destination(L_array->label()); + + int round_mask = MinObjAlignmentInBytes - 1; + + // Figure out header sizes first. + LIR_Opr hss = LIR_OprFact::intConst(Klass::_lh_header_size_shift); + LIR_Opr hsm = LIR_OprFact::intConst(Klass::_lh_header_size_mask); + + LIR_Opr header_size = new_register(T_INT); + __ move(layout, header_size); + LIR_Opr tmp = new_register(T_INT); + __ unsigned_shift_right(header_size, hss, header_size, tmp); + __ logical_and(header_size, hsm, header_size); + __ add(header_size, LIR_OprFact::intConst(round_mask), header_size); + + // Figure out the array length in bytes + assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); + LIR_Opr l2esm = LIR_OprFact::intConst(Klass::_lh_log2_element_size_mask); + __ logical_and(layout, l2esm, layout); + + LIR_Opr length_int = new_register(T_INT); + __ move(new LIR_Address(value.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), length_int); + + #ifdef _LP64 + LIR_Opr length = new_register(T_LONG); + __ convert(Bytecodes::_i2l, length_int, length); + #endif + + // Shift-left awkwardness. Normally it is just: + // __ shift_left(length, layout, length); + // But C1 cannot perform shift_left with non-constant count, so we end up + // doing the per-bit loop dance here. x86_32 also does not know how to shift + // longs, so we have to act on ints. + LabelObj* L_shift_loop = new LabelObj(); + LabelObj* L_shift_exit = new LabelObj(); + + __ branch_destination(L_shift_loop->label()); + __ cmp(lir_cond_equal, layout, 0); + __ branch(lir_cond_equal, L_shift_exit->label()); + + #ifdef _LP64 + __ shift_left(length, 1, length); + #else + __ shift_left(length_int, 1, length_int); + #endif + + __ sub(layout, LIR_OprFact::intConst(1), layout); + + __ branch(lir_cond_always, L_shift_loop->label()); + __ branch_destination(L_shift_exit->label()); + + // Mix all up, round, and push to the result. + #ifdef _LP64 + LIR_Opr header_size_long = new_register(T_LONG); + __ convert(Bytecodes::_i2l, header_size, header_size_long); + __ add(length, header_size_long, length); + if (round_mask != 0) { + __ logical_and(length, LIR_OprFact::longConst(~round_mask), length); + } + __ move(length, result_reg); + #else + __ add(length_int, header_size, length_int); + if (round_mask != 0) { + __ logical_and(length_int, LIR_OprFact::intConst(~round_mask), length_int); + } + __ convert(Bytecodes::_i2l, length_int, result_reg); + #endif + + __ branch_destination(L_done->label()); + } void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) { assert(x->number_of_arguments() == 1, "wrong type"); LIRItem receiver(x->argument_at(0), this);
*** 3046,3055 **** --- 3194,3206 ---- case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; case vmIntrinsics::_isInstance: do_isInstance(x); break; case vmIntrinsics::_isPrimitive: do_isPrimitive(x); break; case vmIntrinsics::_getClass: do_getClass(x); break; case vmIntrinsics::_currentThread: do_currentThread(x); break; + case vmIntrinsics::_addressOf: do_addressOf(x); break; + case vmIntrinsics::_sizeOf: do_sizeOf(x); break; + case vmIntrinsics::_getReferencedObjects: do_getReferencedObjects(x); break; case vmIntrinsics::_dlog: // fall through case vmIntrinsics::_dlog10: // fall through case vmIntrinsics::_dabs: // fall through case vmIntrinsics::_dsqrt: // fall through
< prev index next >