< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 30,39 **** --- 30,40 ---- #include "c1/c1_MacroAssembler.hpp" #include "c1/c1_Runtime1.hpp" #include "c1/c1_ValueStack.hpp" #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" + #include "ci/ciValueKlass.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_x86.hpp" #include "oops/objArrayKlass.hpp"
*** 197,207 **** __ push_reg(opr->as_register_lo()); } else if (opr->is_stack()) { __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); } else if (opr->is_constant()) { LIR_Const* const_opr = opr->as_constant_ptr(); ! if (const_opr->type() == T_OBJECT) { __ push_oop(const_opr->as_jobject()); } else if (const_opr->type() == T_INT) { __ push_jint(const_opr->as_jint()); } else { ShouldNotReachHere(); --- 198,208 ---- __ push_reg(opr->as_register_lo()); } else if (opr->is_stack()) { __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); } else if (opr->is_constant()) { LIR_Const* const_opr = opr->as_constant_ptr(); ! if (const_opr->type() == T_OBJECT || const_opr->type() == T_VALUETYPE) { __ push_oop(const_opr->as_jobject()); } else if (const_opr->type() == T_INT) { __ push_jint(const_opr->as_jint()); } else { ShouldNotReachHere();
*** 287,297 **** // rcx: osr buffer // // build frame ciMethod* m = compilation()->method(); ! __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); // OSR buffer is // // locals[nlocals-1..0] // monitors[0..number_of_locks] --- 288,299 ---- // rcx: osr buffer // // build frame ciMethod* m = compilation()->method(); ! __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), ! needs_stack_repair(), NULL); // OSR buffer is // // locals[nlocals-1..0] // monitors[0..number_of_locks]
*** 484,494 **** if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { __ mov(rax, rbx); // Restore the exception } // remove the activation and dispatch to the unwind handler ! __ remove_frame(initial_frame_size_in_bytes()); __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); // Emit the slow path assembly if (stub != NULL) { stub->emit_code(this); --- 486,496 ---- if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { __ mov(rax, rbx); // Restore the exception } // remove the activation and dispatch to the unwind handler ! __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); // Emit the slow path assembly if (stub != NULL) { stub->emit_code(this);
*** 530,541 **** assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { assert(result->fpu() == 0, "result must already be on TOS"); } // Pop the stack before the safepoint code ! __ remove_frame(initial_frame_size_in_bytes()); if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { __ reserved_stack_check(); } --- 532,562 ---- assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { assert(result->fpu() == 0, "result must already be on TOS"); } + ciMethod* method = compilation()->method(); + if (ValueTypeReturnedAsFields && method->signature()->returns_never_null()) { + ciType* return_type = method->return_type(); + if (return_type->is_valuetype()) { + ciValueKlass* vk = return_type->as_value_klass(); + if (vk->can_be_returned_as_fields()) { + #ifndef _LP64 + Unimplemented(); + #else + address unpack_handler = vk->unpack_handler(); + assert(unpack_handler != NULL, "must be"); + __ call(RuntimeAddress(unpack_handler)); + // At this point, rax points to the value object (for interpreter or C1 caller). + // The fields of the object are copied into registers (for C2 caller). + #endif + } + } + } + // Pop the stack before the safepoint code ! __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { __ reserved_stack_check(); }
*** 569,578 **** --- 590,603 ---- } __ ret(0); } + int LIR_Assembler::store_value_type_fields_to_buf(ciValueKlass* vk) { + return (__ store_value_type_fields_to_buf(vk, false)); + } + int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { guarantee(info != NULL, "Shouldn't be NULL"); int offset = __ offset(); if (SafepointMechanism::uses_thread_local_poll()) { #ifdef _LP64
*** 643,652 **** --- 668,678 ---- __ movptr(dest->as_register_hi(), c->as_jint_hi()); #endif // _LP64 break; } + case T_VALUETYPE: // Fall through case T_OBJECT: { if (patch_code != lir_patch_none) { jobject2reg_with_patching(dest->as_register(), info); } else { __ movoop(dest->as_register(), c->as_jobject());
*** 725,734 **** --- 751,761 ---- case T_ADDRESS: __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); break; + case T_VALUETYPE: // Fall through case T_OBJECT: __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); break; case T_LONG: // fall through
*** 764,773 **** --- 791,801 ---- case T_ADDRESS: __ movptr(as_Address(addr), c->as_jint_bits()); break; + case T_VALUETYPE: // fall through case T_OBJECT: // fall through case T_ARRAY: if (c->as_jobject() == NULL) { if (UseCompressedOops && !wide) { __ movl(as_Address(addr), (int32_t)NULL_WORD);
*** 852,869 **** move_regs(src->as_register_lo(), dest->as_register()); return; } #endif assert(src->is_single_cpu(), "must match"); ! if (src->type() == T_OBJECT) { __ verify_oop(src->as_register()); } move_regs(src->as_register(), dest->as_register()); } else if (dest->is_double_cpu()) { #ifdef _LP64 ! if (src->type() == T_OBJECT || src->type() == T_ARRAY) { // Surprising to me but we can see move of a long to t_object __ verify_oop(src->as_register()); move_regs(src->as_register(), dest->as_register_lo()); return; } --- 880,897 ---- move_regs(src->as_register_lo(), dest->as_register()); return; } #endif assert(src->is_single_cpu(), "must match"); ! if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) { __ verify_oop(src->as_register()); } move_regs(src->as_register(), dest->as_register()); } else if (dest->is_double_cpu()) { #ifdef _LP64 ! if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) { // Surprising to me but we can see move of a long to t_object __ verify_oop(src->as_register()); move_regs(src->as_register(), dest->as_register_lo()); return; }
*** 930,940 **** assert(src->is_register(), "should not call otherwise"); assert(dest->is_stack(), "should not call otherwise"); if (src->is_single_cpu()) { Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); ! if (type == T_OBJECT || type == T_ARRAY) { __ verify_oop(src->as_register()); __ movptr (dst, src->as_register()); } else if (type == T_METADATA) { __ movptr (dst, src->as_register()); } else { --- 958,968 ---- assert(src->is_register(), "should not call otherwise"); assert(dest->is_stack(), "should not call otherwise"); if (src->is_single_cpu()) { Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); ! if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) { __ verify_oop(src->as_register()); __ movptr (dst, src->as_register()); } else if (type == T_METADATA) { __ movptr (dst, src->as_register()); } else {
*** 976,986 **** void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { LIR_Address* to_addr = dest->as_address_ptr(); PatchingStub* patch = NULL; Register compressed_src = rscratch1; ! if (type == T_ARRAY || type == T_OBJECT) { __ verify_oop(src->as_register()); #ifdef _LP64 if (UseCompressedOops && !wide) { __ movptr(compressed_src, src->as_register()); __ encode_heap_oop(compressed_src); --- 1004,1014 ---- void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { LIR_Address* to_addr = dest->as_address_ptr(); PatchingStub* patch = NULL; Register compressed_src = rscratch1; ! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { __ verify_oop(src->as_register()); #ifdef _LP64 if (UseCompressedOops && !wide) { __ movptr(compressed_src, src->as_register()); __ encode_heap_oop(compressed_src);
*** 1021,1030 **** --- 1049,1059 ---- else __ fst_d (as_Address(to_addr)); } break; } + case T_VALUETYPE: // fall through case T_ARRAY: // fall through case T_OBJECT: // fall through if (UseCompressedOops && !wide) { __ movl(as_Address(to_addr), compressed_src); } else {
*** 1111,1121 **** void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { assert(src->is_stack(), "should not call otherwise"); assert(dest->is_register(), "should not call otherwise"); if (dest->is_single_cpu()) { ! if (type == T_ARRAY || type == T_OBJECT) { __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); __ verify_oop(dest->as_register()); } else if (type == T_METADATA) { __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); } else { --- 1140,1150 ---- void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { assert(src->is_stack(), "should not call otherwise"); assert(dest->is_register(), "should not call otherwise"); if (dest->is_single_cpu()) { ! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); __ verify_oop(dest->as_register()); } else if (type == T_METADATA) { __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); } else {
*** 1152,1162 **** } void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { if (src->is_single_stack()) { ! if (type == T_OBJECT || type == T_ARRAY) { __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); } else { #ifndef _LP64 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); --- 1181,1191 ---- } void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { if (src->is_single_stack()) { ! if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) { __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); } else { #ifndef _LP64 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
*** 1191,1201 **** assert(dest->is_register(), "should not call otherwise"); LIR_Address* addr = src->as_address_ptr(); Address from_addr = as_Address(addr); ! if (addr->base()->type() == T_OBJECT) { __ verify_oop(addr->base()->as_pointer_register()); } switch (type) { case T_BOOLEAN: // fall through --- 1220,1230 ---- assert(dest->is_register(), "should not call otherwise"); LIR_Address* addr = src->as_address_ptr(); Address from_addr = as_Address(addr); ! if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) { __ verify_oop(addr->base()->as_pointer_register()); } switch (type) { case T_BOOLEAN: // fall through
*** 1244,1253 **** --- 1273,1283 ---- __ fld_d(from_addr); } break; } + case T_VALUETYPE: // fall through case T_OBJECT: // fall through case T_ARRAY: // fall through if (UseCompressedOops && !wide) { __ movl(dest->as_register(), from_addr); } else {
*** 1353,1363 **** if (patch != NULL) { patching_epilog(patch, patch_code, addr->base()->as_register(), info); } ! if (type == T_ARRAY || type == T_OBJECT) { #ifdef _LP64 if (UseCompressedOops && !wide) { __ decode_heap_oop(dest->as_register()); } #endif --- 1383,1393 ---- if (patch != NULL) { patching_epilog(patch, patch_code, addr->base()->as_register(), info); } ! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { #ifdef _LP64 if (UseCompressedOops && !wide) { __ decode_heap_oop(dest->as_register()); } #endif
*** 1367,1378 **** --- 1397,1414 ---- __ verify_oop(dest->as_register()); } } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { #ifdef _LP64 if (UseCompressedClassPointers) { + __ andl(dest->as_register(), oopDesc::compressed_klass_mask()); __ decode_klass_not_null(dest->as_register()); + } else { + __ shlq(dest->as_register(), oopDesc::storage_props_nof_bits); + __ shrq(dest->as_register(), oopDesc::storage_props_nof_bits); } + #else + __ andl(dest->as_register(), oopDesc::wide_klass_mask()); #endif } }
*** 1590,1600 **** void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { Register len = op->len()->as_register(); LP64_ONLY( __ movslq(len, len); ) ! if (UseSlowPath || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { __ jmp(*op->stub()->entry()); } else { Register tmp1 = op->tmp1()->as_register(); --- 1626,1636 ---- void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { Register len = op->len()->as_register(); LP64_ONLY( __ movslq(len, len); ) ! if (UseSlowPath || op->type() == T_VALUETYPE || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { __ jmp(*op->stub()->entry()); } else { Register tmp1 = op->tmp1()->as_register();
*** 1688,1711 **** select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); } assert_different_registers(obj, k_RInfo, klass_RInfo); ! __ cmpptr(obj, (int32_t)NULL_WORD); ! if (op->should_profile()) { ! Label not_null; ! __ jccb(Assembler::notEqual, not_null); ! // Object is null; update MDO and exit ! Register mdo = klass_RInfo; ! __ mov_metadata(mdo, md->constant_encoding()); ! Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); ! int header_bits = BitData::null_seen_byte_constant(); ! __ orb(data_addr, header_bits); ! __ jmp(*obj_is_null); ! __ bind(not_null); ! } else { ! __ jcc(Assembler::equal, *obj_is_null); } if (!k->is_loaded()) { klass2reg_with_patching(k_RInfo, op->info_for_patch()); } else { --- 1724,1749 ---- select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); } assert_different_registers(obj, k_RInfo, klass_RInfo); ! if (op->need_null_check()) { ! __ cmpptr(obj, (int32_t)NULL_WORD); ! if (op->should_profile()) { ! Label not_null; ! __ jccb(Assembler::notEqual, not_null); ! // Object is null; update MDO and exit ! Register mdo = klass_RInfo; ! __ mov_metadata(mdo, md->constant_encoding()); ! Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); ! int header_bits = BitData::null_seen_byte_constant(); ! __ orb(data_addr, header_bits); ! __ jmp(*obj_is_null); ! __ bind(not_null); ! } else { ! __ jcc(Assembler::equal, *obj_is_null); ! } } if (!k->is_loaded()) { klass2reg_with_patching(k_RInfo, op->info_for_patch()); } else {
*** 1912,1921 **** --- 1950,2066 ---- ShouldNotReachHere(); } } + void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) { + // We are loading/storing an array that *may* be a flattened array (the declared type + // Object[], interface[], or VT?[]). If this array is flattened, take slow path. + + __ load_storage_props(op->tmp()->as_register(), op->array()->as_register()); + __ testb(op->tmp()->as_register(), ArrayStorageProperties::flattened_value); + __ jcc(Assembler::notZero, *op->stub()->entry()); + if (!op->value()->is_illegal()) { + // We are storing into the array. + Label skip; + __ testb(op->tmp()->as_register(), ArrayStorageProperties::null_free_value); + __ jcc(Assembler::zero, skip); + // The array is not flattened, but it is null_free. If we are storing + // a null, take the slow path (which will throw NPE). + __ cmpptr(op->value()->as_register(), (int32_t)NULL_WORD); + __ jcc(Assembler::zero, *op->stub()->entry()); + __ bind(skip); + } + } + + void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) { + // This is called when we use aastore into a an array declared as "[LVT;", + // where we know VT is not flattenable (due to ValueArrayElemMaxFlatOops, etc). + // However, we need to do a NULL check if the actual array is a "[QVT;". + + __ load_storage_props(op->tmp()->as_register(), op->array()->as_register()); + __ testb(op->tmp()->as_register(), ArrayStorageProperties::null_free_value); + } + + void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) { + Label L_oops_equal; + Label L_oops_not_equal; + Label L_end; + + Register left = op->left()->as_register(); + Register right = op->right()->as_register(); + + __ cmpptr(left, right); + __ jcc(Assembler::equal, L_oops_equal); + + // (1) Null check -- if one of the operands is null, the other must not be null (because + // the two references are not equal), so they are not substitutable, + // FIXME: do null check only if the operand is nullable + { + __ cmpptr(left, (int32_t)NULL_WORD); + __ jcc(Assembler::equal, L_oops_not_equal); + + __ cmpptr(right, (int32_t)NULL_WORD); + __ jcc(Assembler::equal, L_oops_not_equal); + } + + ciKlass* left_klass = op->left_klass(); + ciKlass* right_klass = op->right_klass(); + + // (2) Value object check -- if either of the operands is not a value object, + // they are not substitutable. We do this only if we are not sure that the + // operands are value objects + if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node. + !left_klass->is_valuetype() || !right_klass->is_valuetype()) { + Register tmp1 = op->tmp1()->as_register(); + __ movptr(tmp1, (intptr_t)markOopDesc::always_locked_pattern); + __ andl(tmp1, Address(left, oopDesc::mark_offset_in_bytes())); + __ andl(tmp1, Address(right, oopDesc::mark_offset_in_bytes())); + __ cmpptr(tmp1, (intptr_t)markOopDesc::always_locked_pattern); + __ jcc(Assembler::notEqual, L_oops_not_equal); + } + + // (3) Same klass check: if the operands are of different klasses, they are not substitutable. + if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) { + // No need to load klass -- the operands are statically known to be the same value klass. + __ jmp(*op->stub()->entry()); + } else { + Register left_klass_op = op->left_klass_op()->as_register(); + Register right_klass_op = op->right_klass_op()->as_register(); + + if (UseCompressedOops) { + __ movl(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes())); + __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes())); + __ cmpl(left_klass_op, right_klass_op); + } else { + __ movptr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes())); + __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes())); + __ cmpptr(left_klass_op, right_klass_op); + } + + __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check + // fall through to L_oops_not_equal + } + + __ bind(L_oops_not_equal); + move(op->not_equal_result(), op->result_opr()); + __ jmp(L_end); + + __ bind(L_oops_equal); + move(op->equal_result(), op->result_opr()); + __ jmp(L_end); + + // We've returned from the stub. RAX contains 0x0 IFF the two + // operands are not substitutable. (Don't compare against 0x1 in case the + // C compiler is naughty) + __ bind(*op->stub()->continuation()); + __ cmpl(rax, 0); + __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal + move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal + // fall-through + __ bind(L_end); + } void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
*** 1972,1981 **** --- 2117,2141 ---- } else { Unimplemented(); } } + void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) { + assert(dst->is_cpu_register(), "must be"); + assert(dst->type() == src->type(), "must be"); + + if (src->is_cpu_register()) { + reg2reg(src, dst); + } else if (src->is_stack()) { + stack2reg(src, dst, dst->type()); + } else if (src->is_constant()) { + const2reg(src, dst, lir_patch_none, NULL); + } else { + ShouldNotReachHere(); + } + } + void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { Assembler::Condition acond, ncond; switch (condition) { case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
*** 2508,2518 **** } #endif // _LP64 } else { #ifdef _LP64 Register r_lo; ! if (right->type() == T_OBJECT || right->type() == T_ARRAY) { r_lo = right->as_register(); } else { r_lo = right->as_register_lo(); } #else --- 2668,2678 ---- } #endif // _LP64 } else { #ifdef _LP64 Register r_lo; ! if (right->type() == T_OBJECT || right->type() == T_ARRAY || right->type() == T_VALUETYPE) { r_lo = right->as_register(); } else { r_lo = right->as_register_lo(); } #else
*** 2621,2649 **** void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { if (opr1->is_single_cpu()) { Register reg1 = opr1->as_register(); if (opr2->is_single_cpu()) { // cpu register - cpu register ! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { __ cmpoop(reg1, opr2->as_register()); } else { ! assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); __ cmpl(reg1, opr2->as_register()); } } else if (opr2->is_stack()) { // cpu register - stack ! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); } else { __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); } } else if (opr2->is_constant()) { // cpu register - constant LIR_Const* c = opr2->as_constant_ptr(); if (c->type() == T_INT) { __ cmpl(reg1, c->as_jint()); ! } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { // In 64bit oops are single register jobject o = c->as_jobject(); if (o == NULL) { __ cmpptr(reg1, (int32_t)NULL_WORD); } else { --- 2781,2809 ---- void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { if (opr1->is_single_cpu()) { Register reg1 = opr1->as_register(); if (opr2->is_single_cpu()) { // cpu register - cpu register ! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) { __ cmpoop(reg1, opr2->as_register()); } else { ! assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE, "cmp int, oop?"); __ cmpl(reg1, opr2->as_register()); } } else if (opr2->is_stack()) { // cpu register - stack ! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) { __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); } else { __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); } } else if (opr2->is_constant()) { // cpu register - constant LIR_Const* c = opr2->as_constant_ptr(); if (c->type() == T_INT) { __ cmpl(reg1, c->as_jint()); ! } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) { // In 64bit oops are single register jobject o = c->as_jobject(); if (o == NULL) { __ cmpptr(reg1, (int32_t)NULL_WORD); } else {
*** 2739,2749 **** __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); } else if (opr1->is_address() && opr2->is_constant()) { LIR_Const* c = opr2->as_constant_ptr(); #ifdef _LP64 ! if (c->type() == T_OBJECT || c->type() == T_ARRAY) { assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); __ movoop(rscratch1, c->as_jobject()); } #endif // LP64 if (op->info() != NULL) { --- 2899,2909 ---- __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); } else if (opr1->is_address() && opr2->is_constant()) { LIR_Const* c = opr2->as_constant_ptr(); #ifdef _LP64 ! if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) { assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); __ movoop(rscratch1, c->as_jobject()); } #endif // LP64 if (op->info() != NULL) {
*** 2751,2761 **** } // special case: address - constant LIR_Address* addr = opr1->as_address_ptr(); if (c->type() == T_INT) { __ cmpl(as_Address(addr), c->as_jint()); ! } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { #ifdef _LP64 // %%% Make this explode if addr isn't reachable until we figure out a // better strategy by giving noreg as the temp for as_Address __ cmpoop(rscratch1, as_Address(addr, noreg)); #else --- 2911,2921 ---- } // special case: address - constant LIR_Address* addr = opr1->as_address_ptr(); if (c->type() == T_INT) { __ cmpl(as_Address(addr), c->as_jint()); ! } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) { #ifdef _LP64 // %%% Make this explode if addr isn't reachable until we figure out a // better strategy by giving noreg as the temp for as_Address __ cmpoop(rscratch1, as_Address(addr, noreg)); #else
*** 2830,2846 **** void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); __ call(AddressLiteral(op->addr(), rtype)); ! add_call_info(code_offset(), op->info()); } void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { __ ic_call(op->addr()); ! add_call_info(code_offset(), op->info()); assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); } --- 2990,3006 ---- void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); __ call(AddressLiteral(op->addr(), rtype)); ! add_call_info(code_offset(), op->info(), op->maybe_return_as_fields()); } void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { __ ic_call(op->addr()); ! add_call_info(code_offset(), op->info(), op->maybe_return_as_fields()); assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); }
*** 3032,3041 **** --- 3192,3218 ---- assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m); } + void LIR_Assembler::arraycopy_valuetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) { + if (null_check) { + __ testptr(obj, obj); + __ jcc(Assembler::zero, *slow_path->entry()); + } + __ load_storage_props(tmp, obj); + if (is_dest) { + // We also take slow path if it's a null_free destination array, just in case the source array + // contains NULLs. + __ testb(tmp, ArrayStorageProperties::flattened_value | ArrayStorageProperties::null_free_value); + } else { + __ testb(tmp, ArrayStorageProperties::flattened_value); + } + __ jcc(Assembler::notEqual, *slow_path->entry()); + } + + // This code replaces a call to arraycopy; no exception may // be thrown in this code, they must be thrown in the System.arraycopy // activation frame; we could save some checks if this would not be the case void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { ciArrayKlass* default_type = op->expected_type();
*** 3050,3060 **** __ resolve(ACCESS_WRITE, dst); CodeStub* stub = op->stub(); int flags = op->flags(); BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; ! if (basic_type == T_ARRAY) basic_type = T_OBJECT; // if we don't know anything, just go through the generic arraycopy if (default_type == NULL) { // save outgoing arguments on stack in case call to System.arraycopy is needed // HACK ALERT. This code used to push the parameters in a hardwired fashion --- 3227,3251 ---- __ resolve(ACCESS_WRITE, dst); CodeStub* stub = op->stub(); int flags = op->flags(); BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; ! if (basic_type == T_ARRAY || basic_type == T_VALUETYPE) basic_type = T_OBJECT; ! ! if (flags & LIR_OpArrayCopy::always_slow_path) { ! __ jmp(*stub->entry()); ! __ bind(*stub->continuation()); ! return; ! } ! ! if (flags & LIR_OpArrayCopy::src_valuetype_check) { ! arraycopy_valuetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check)); ! } ! ! if (flags & LIR_OpArrayCopy::dst_valuetype_check) { ! arraycopy_valuetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check)); ! } // if we don't know anything, just go through the generic arraycopy if (default_type == NULL) { // save outgoing arguments on stack in case call to System.arraycopy is needed // HACK ALERT. This code used to push the parameters in a hardwired fashion
< prev index next >