1011 l = in_jvms->scl_size();
1012 out_jvms->set_scloff(p);
1013 for (j = 0; j < l; j++)
1014 call->set_req(p++, in_map->in(k+j));
1015
1016 // Finish the new jvms.
1017 out_jvms->set_endoff(p);
1018
1019 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1020 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1021 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1022 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1023 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1024 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1025
1026 // Update the two tail pointers in parallel.
1027 out_jvms = out_jvms->caller();
1028 in_jvms = in_jvms->caller();
1029 }
1030
1031 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1032
1033 // Test the correctness of JVMState::debug_xxx accessors:
1034 assert(call->jvms()->debug_start() == non_debug_edges, "");
1035 assert(call->jvms()->debug_end() == call->req(), "");
1036 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1037 }
1038
1039 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1040 Bytecodes::Code code = java_bc();
1041 if (code == Bytecodes::_wide) {
1042 code = method()->java_code_at_bci(bci() + 1);
1043 }
1044
1045 if (code != Bytecodes::_illegal) {
1046 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1047 }
1048
1049 auto rsize = [&]() {
1050 assert(code != Bytecodes::_illegal, "code is illegal!");
3498
3499 // lock has no side-effects, sets few values
3500 set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
3501
3502 insert_mem_bar(Op_MemBarAcquireLock);
3503
3504 // Add this to the worklist so that the lock can be eliminated
3505 record_for_igvn(lock);
3506
3507 #ifndef PRODUCT
3508 if (PrintLockStatistics) {
3509 // Update the counter for this lock. Don't bother using an atomic
3510 // operation since we don't require absolute accuracy.
3511 lock->create_lock_counter(map()->jvms());
3512 increment_counter(lock->counter()->addr());
3513 }
3514 #endif
3515
3516 return flock;
3517 }
3518
3519
3520 //------------------------------shared_unlock----------------------------------
3521 // Emit unlocking code.
3522 void GraphKit::shared_unlock(Node* box, Node* obj) {
3523 // bci is either a monitorenter bc or InvocationEntryBci
3524 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3525 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3526
3527 if( !GenerateSynchronizationCode )
3528 return;
3529 if (stopped()) { // Dead monitor?
3530 map()->pop_monitor(); // Kill monitor from debug info
3531 return;
3532 }
3533
3534 // Memory barrier to avoid floating things down past the locked region
3535 insert_mem_bar(Op_MemBarReleaseLock);
3536
3537 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3538 UnlockNode *unlock = new UnlockNode(C, tf);
3539 #ifdef ASSERT
3540 unlock->set_dbg_jvms(sync_jvms());
3541 #endif
3542 uint raw_idx = Compile::AliasIdxRaw;
3543 unlock->init_req( TypeFunc::Control, control() );
3544 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3545 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3546 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3547 unlock->init_req( TypeFunc::ReturnAdr, top() );
3548
3549 unlock->init_req(TypeFunc::Parms + 0, obj);
3550 unlock->init_req(TypeFunc::Parms + 1, box);
3551 unlock = _gvn.transform(unlock)->as_Unlock();
3552
3553 Node* mem = reset_memory();
3554
3555 // unlock has no side-effects, sets few values
3556 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3557
3558 // Kill monitor from debug info
3559 map()->pop_monitor( );
3560 }
3561
3562 //-------------------------------get_layout_helper-----------------------------
3563 // If the given klass is a constant or known to be an array,
3564 // fetch the constant layout helper value into constant_value
3565 // and return null. Otherwise, load the non-constant
3566 // layout helper value, and return the node which represents it.
3567 // This two-faced routine is useful because allocation sites
3568 // almost always feature constant types.
3569 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3570 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3571 if (!StressReflectiveCode && klass_t != nullptr) {
3572 bool xklass = klass_t->klass_is_exact();
3573 if (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM)) {
3574 jint lhelper;
3575 if (klass_t->isa_aryklassptr()) {
3576 BasicType elem = klass_t->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
3577 if (is_reference_type(elem, true)) {
3578 elem = T_OBJECT;
3579 }
3587 }
3588 }
3589 }
3590 constant_value = Klass::_lh_neutral_value; // put in a known value
3591 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3592 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3593 }
3594
3595 // We just put in an allocate/initialize with a big raw-memory effect.
3596 // Hook selected additional alias categories on the initialization.
3597 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3598 MergeMemNode* init_in_merge,
3599 Node* init_out_raw) {
3600 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3601 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3602
3603 Node* prevmem = kit.memory(alias_idx);
3604 init_in_merge->set_memory_at(alias_idx, prevmem);
3605 kit.set_memory(init_out_raw, alias_idx);
3606 }
3607
3608 //---------------------------set_output_for_allocation-------------------------
3609 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3610 const TypeOopPtr* oop_type,
3611 bool deoptimize_on_exception) {
3612 int rawidx = Compile::AliasIdxRaw;
3613 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3614 add_safepoint_edges(alloc);
3615 Node* allocx = _gvn.transform(alloc);
3616 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3617 // create memory projection for i_o
3618 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3619 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3620
3621 // create a memory projection as for the normal control path
3622 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3623 set_memory(malloc, rawidx);
3624
3625 // a normal slow-call doesn't change i_o, but an allocation does
3626 // we create a separate i_o projection for the normal control path
3627 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3628 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3629
3630 // put in an initialization barrier
3631 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3632 rawoop)->as_Initialize();
3633 assert(alloc->initialization() == init, "2-way macro link must work");
3634 assert(init ->allocation() == alloc, "2-way macro link must work");
3673 #ifdef ASSERT
3674 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
3675 assert(AllocateNode::Ideal_allocation(rawoop) == alloc,
3676 "Ideal_allocation works");
3677 assert(AllocateNode::Ideal_allocation(javaoop) == alloc,
3678 "Ideal_allocation works");
3679 if (alloc->is_AllocateArray()) {
3680 assert(AllocateArrayNode::Ideal_array_allocation(rawoop) == alloc->as_AllocateArray(),
3681 "Ideal_allocation works");
3682 assert(AllocateArrayNode::Ideal_array_allocation(javaoop) == alloc->as_AllocateArray(),
3683 "Ideal_allocation works");
3684 } else {
3685 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3686 }
3687 }
3688 #endif //ASSERT
3689
3690 return javaoop;
3691 }
3692
3693 //---------------------------new_instance--------------------------------------
3694 // This routine takes a klass_node which may be constant (for a static type)
3695 // or may be non-constant (for reflective code). It will work equally well
3696 // for either, and the graph will fold nicely if the optimizer later reduces
3697 // the type to a constant.
3698 // The optional arguments are for specialized use by intrinsics:
3699 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3700 // - If 'return_size_val', report the total object size to the caller.
3701 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3702 Node* GraphKit::new_instance(Node* klass_node,
3703 Node* extra_slow_test,
3704 Node* *return_size_val,
3705 bool deoptimize_on_exception) {
3706 // Compute size in doublewords
3707 // The size is always an integral number of doublewords, represented
3708 // as a positive bytewise size stored in the klass's layout_helper.
3709 // The layout_helper also encodes (in a low bit) the need for a slow path.
3710 jint layout_con = Klass::_lh_neutral_value;
3711 Node* layout_val = get_layout_helper(klass_node, layout_con);
3712 int layout_is_con = (layout_val == nullptr);
4233 }
4234
4235 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4236 if (!field->is_constant()) {
4237 return nullptr; // Field not marked as constant.
4238 }
4239 ciInstance* holder = nullptr;
4240 if (!field->is_static()) {
4241 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4242 if (const_oop != nullptr && const_oop->is_instance()) {
4243 holder = const_oop->as_instance();
4244 }
4245 }
4246 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4247 /*is_unsigned_load=*/false);
4248 if (con_type != nullptr) {
4249 return makecon(con_type);
4250 }
4251 return nullptr;
4252 }
|
1011 l = in_jvms->scl_size();
1012 out_jvms->set_scloff(p);
1013 for (j = 0; j < l; j++)
1014 call->set_req(p++, in_map->in(k+j));
1015
1016 // Finish the new jvms.
1017 out_jvms->set_endoff(p);
1018
1019 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1020 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1021 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1022 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1023 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1024 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1025
1026 // Update the two tail pointers in parallel.
1027 out_jvms = out_jvms->caller();
1028 in_jvms = in_jvms->caller();
1029 }
1030
1031 PEAState& as = youngest_jvms->alloc_state();
1032 backfill_materialized(call, TypeFunc::Parms, call->req(), as);
1033 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1034
1035 // Test the correctness of JVMState::debug_xxx accessors:
1036 assert(call->jvms()->debug_start() == non_debug_edges, "");
1037 assert(call->jvms()->debug_end() == call->req(), "");
1038 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1039 }
1040
1041 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1042 Bytecodes::Code code = java_bc();
1043 if (code == Bytecodes::_wide) {
1044 code = method()->java_code_at_bci(bci() + 1);
1045 }
1046
1047 if (code != Bytecodes::_illegal) {
1048 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1049 }
1050
1051 auto rsize = [&]() {
1052 assert(code != Bytecodes::_illegal, "code is illegal!");
3500
3501 // lock has no side-effects, sets few values
3502 set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
3503
3504 insert_mem_bar(Op_MemBarAcquireLock);
3505
3506 // Add this to the worklist so that the lock can be eliminated
3507 record_for_igvn(lock);
3508
3509 #ifndef PRODUCT
3510 if (PrintLockStatistics) {
3511 // Update the counter for this lock. Don't bother using an atomic
3512 // operation since we don't require absolute accuracy.
3513 lock->create_lock_counter(map()->jvms());
3514 increment_counter(lock->counter()->addr());
3515 }
3516 #endif
3517
3518 return flock;
3519 }
3520 // Clone LockNode for PEA materialization.
3521 // LockNode is a safepoint, so it's location sensitive. We can't just clone it.
3522 // We create it using the current JVMState and mark bytecode non-reexecute.
3523 //
3524 // It is worthy noting that we reuse BoxNode. It represents the slot on stack.
3525 // PhaseMacroExpand::mark_eliminated_box() can't eliminate BoxLockNode and its
3526 // associated AbstractLockNodes until share prove that the 'eliminated' BoxLockNode
3527 // is exclusive.
3528 void GraphKit::clone_shared_lock(Node* box, Node* obj) {
3529 kill_dead_locals();
3530
3531 Node* mem = reset_memory();
3532
3533 FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3534 const TypeFunc *tf = LockNode::lock_type();
3535 LockNode *lock = new LockNode(C, tf);
3536
3537 lock->set_req(TypeFunc::Control, control());
3538 lock->set_req(TypeFunc::Memory , mem);
3539 lock->set_req(TypeFunc::I_O , top()) ; // does no i/o
3540 lock->set_req(TypeFunc::FramePtr, frameptr());
3541 lock->set_req(TypeFunc::ReturnAdr, top());
3542
3543 lock->set_req(TypeFunc::Parms + 0, obj);
3544 lock->set_req(TypeFunc::Parms + 1, box);
3545 lock->set_req(TypeFunc::Parms + 2, flock);
3546
3547 // we can't reexecute current bc. it's not monitorenter.
3548 jvms()->set_should_reexecute(false);
3549 add_safepoint_edges(lock);
3550
3551 lock = _gvn.transform( lock )->as_Lock();
3552
3553 // lock has no side-effects, sets few values
3554 set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
3555
3556 insert_mem_bar(Op_MemBarAcquireLock);
3557
3558 // Add this to the worklist so that the lock can be eliminated
3559 record_for_igvn(lock);
3560 }
3561
3562 //------------------------------shared_unlock----------------------------------
3563 // Emit unlocking code.
3564 void GraphKit::shared_unlock(Node* box, Node* obj, bool preserve_monitor) {
3565 // bci is either a monitorenter bc or InvocationEntryBci
3566 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3567 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3568
3569 if( !GenerateSynchronizationCode )
3570 return;
3571 if (stopped()) { // Dead monitor?
3572 if (!preserve_monitor) {
3573 map()->pop_monitor(); // Kill monitor from debug info
3574 }
3575 return;
3576 }
3577
3578 // Memory barrier to avoid floating things down past the locked region
3579 insert_mem_bar(Op_MemBarReleaseLock);
3580
3581 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3582 UnlockNode *unlock = new UnlockNode(C, tf);
3583 #ifdef ASSERT
3584 unlock->set_dbg_jvms(sync_jvms());
3585 #endif
3586 uint raw_idx = Compile::AliasIdxRaw;
3587 unlock->init_req( TypeFunc::Control, control() );
3588 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3589 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3590 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3591 unlock->init_req( TypeFunc::ReturnAdr, top() );
3592
3593 unlock->init_req(TypeFunc::Parms + 0, obj);
3594 unlock->init_req(TypeFunc::Parms + 1, box);
3595 unlock = _gvn.transform(unlock)->as_Unlock();
3596
3597 Node* mem = reset_memory();
3598
3599 // unlock has no side-effects, sets few values
3600 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3601 if (!preserve_monitor) {
3602 // Kill monitor from debug info
3603 map()->pop_monitor();
3604 }
3605 }
3606
3607 //-------------------------------get_layout_helper-----------------------------
3608 // If the given klass is a constant or known to be an array,
3609 // fetch the constant layout helper value into constant_value
3610 // and return null. Otherwise, load the non-constant
3611 // layout helper value, and return the node which represents it.
3612 // This two-faced routine is useful because allocation sites
3613 // almost always feature constant types.
3614 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3615 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3616 if (!StressReflectiveCode && klass_t != nullptr) {
3617 bool xklass = klass_t->klass_is_exact();
3618 if (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM)) {
3619 jint lhelper;
3620 if (klass_t->isa_aryklassptr()) {
3621 BasicType elem = klass_t->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
3622 if (is_reference_type(elem, true)) {
3623 elem = T_OBJECT;
3624 }
3632 }
3633 }
3634 }
3635 constant_value = Klass::_lh_neutral_value; // put in a known value
3636 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3637 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3638 }
3639
3640 // We just put in an allocate/initialize with a big raw-memory effect.
3641 // Hook selected additional alias categories on the initialization.
3642 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3643 MergeMemNode* init_in_merge,
3644 Node* init_out_raw) {
3645 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3646 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3647
3648 Node* prevmem = kit.memory(alias_idx);
3649 init_in_merge->set_memory_at(alias_idx, prevmem);
3650 kit.set_memory(init_out_raw, alias_idx);
3651 }
3652 Node* GraphKit::set_output_for_allocation_common(AllocateNode* alloc,
3653 const TypeOopPtr* oop_type,
3654 bool deoptimize_on_exception) {
3655 int rawidx = Compile::AliasIdxRaw;
3656 Node* allocx = _gvn.transform(alloc);
3657 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3658 // create memory projection for i_o
3659 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3660 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3661
3662 // create a memory projection as for the normal control path
3663 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3664 set_memory(malloc, rawidx);
3665
3666 // a normal slow-call doesn't change i_o, but an allocation does
3667 // we create a separate i_o projection for the normal control path
3668 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3669 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3670
3671 // put in an initialization barrier
3672 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3673 rawoop)->as_Initialize();
3674 assert(alloc->initialization() == init, "2-way macro link must work");
3675 assert(init ->allocation() == alloc, "2-way macro link must work");
3714 #ifdef ASSERT
3715 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
3716 assert(AllocateNode::Ideal_allocation(rawoop) == alloc,
3717 "Ideal_allocation works");
3718 assert(AllocateNode::Ideal_allocation(javaoop) == alloc,
3719 "Ideal_allocation works");
3720 if (alloc->is_AllocateArray()) {
3721 assert(AllocateArrayNode::Ideal_array_allocation(rawoop) == alloc->as_AllocateArray(),
3722 "Ideal_allocation works");
3723 assert(AllocateArrayNode::Ideal_array_allocation(javaoop) == alloc->as_AllocateArray(),
3724 "Ideal_allocation works");
3725 } else {
3726 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3727 }
3728 }
3729 #endif //ASSERT
3730
3731 return javaoop;
3732 }
3733
3734 //---------------------------set_output_for_allocation-------------------------
3735 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3736 const TypeOopPtr* oop_type,
3737 bool deoptimize_on_exception) {
3738 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3739 add_safepoint_edges(alloc);
3740 return set_output_for_allocation_common(alloc, oop_type, deoptimize_on_exception);
3741 }
3742
3743 //
3744 // Position-Agnostic Materialization
3745 // -------------------------------------
3746 // When PEA materializes a virtual object, it emits a cluster of nodes in the current position.
3747 // Unlike ordinary "floating" nodes, an AllocateNode is a subclass of SafePointNode so it is
3748 // dependent on JVMState. The JVMState of current position may not fit for an AllocateNode.
3749 //
3750 // To ensure we can safely embed nodes into the curren position, we have the following
3751 // measures:
3752 //
3753 // 1. Debug edges and JVMState of the cloned AllocateNode are not from current GraphKit.
3754 // We copy them from the original AllocateNode instead.
3755 //
3756 // 2. Choose deoptimization on exception. A real exception may be mistakenly dispatched to
3757 // the exception handler in current context.
3758 //
3759 Node* GraphKit::materialize_object(AllocateNode* alloc, const TypeOopPtr* oop_type) {
3760 Node *mem = reset_memory();
3761 AllocateNode* allocx = new AllocateNode(C, alloc->tf(), control(), mem, i_o(),
3762 alloc->in(AllocateNode::AllocSize),
3763 alloc->in(AllocateNode::KlassNode),
3764 alloc->in(AllocateNode::InitialTest));
3765 allocx->set_req(TypeFunc::FramePtr, frameptr());
3766
3767 JVMState* out_jvms = alloc->jvms()->clone_shallow(C);
3768 out_jvms->bind_map(allocx);
3769 // copy all debuginfo edges from the original AllocateNode
3770 for (uint i=allocx->req(); i < alloc->req(); ++i) {
3771 allocx->add_req(alloc->in(i));
3772 }
3773
3774 JVMState* jvms = sync_jvms();
3775 // We can not use PreserveJVMState here because 'this' is a Parse. We would fail in jvms_in_sync().
3776 GraphKit kit(allocx->jvms());
3777 kit.set_map_clone(allocx);
3778
3779 Node* objx = kit.set_output_for_allocation_common(allocx, oop_type, true /*deoptimize_on_ex*/);
3780
3781 // copy back compile-time state to 'this'.
3782 set_jvms(jvms);
3783 set_control(kit.control());
3784 set_i_o(kit.i_o());
3785 set_all_memory(kit.merged_memory());
3786
3787 return objx;
3788 }
3789
3790 //---------------------------new_instance--------------------------------------
3791 // This routine takes a klass_node which may be constant (for a static type)
3792 // or may be non-constant (for reflective code). It will work equally well
3793 // for either, and the graph will fold nicely if the optimizer later reduces
3794 // the type to a constant.
3795 // The optional arguments are for specialized use by intrinsics:
3796 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3797 // - If 'return_size_val', report the total object size to the caller.
3798 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3799 Node* GraphKit::new_instance(Node* klass_node,
3800 Node* extra_slow_test,
3801 Node* *return_size_val,
3802 bool deoptimize_on_exception) {
3803 // Compute size in doublewords
3804 // The size is always an integral number of doublewords, represented
3805 // as a positive bytewise size stored in the klass's layout_helper.
3806 // The layout_helper also encodes (in a low bit) the need for a slow path.
3807 jint layout_con = Klass::_lh_neutral_value;
3808 Node* layout_val = get_layout_helper(klass_node, layout_con);
3809 int layout_is_con = (layout_val == nullptr);
4330 }
4331
4332 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4333 if (!field->is_constant()) {
4334 return nullptr; // Field not marked as constant.
4335 }
4336 ciInstance* holder = nullptr;
4337 if (!field->is_static()) {
4338 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4339 if (const_oop != nullptr && const_oop->is_instance()) {
4340 holder = const_oop->as_instance();
4341 }
4342 }
4343 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4344 /*is_unsigned_load=*/false);
4345 if (con_type != nullptr) {
4346 return makecon(con_type);
4347 }
4348 return nullptr;
4349 }
4350
4351 void GraphKit::backfill_materialized(SafePointNode* map, uint begin, uint end, PEAState& as){
4352 bool printed = false;
4353
4354 for (uint i = begin; i < end; ++i) {
4355 Node* t = map->in(i);
4356
4357 if (t != nullptr && t->is_CheckCastPP()) {
4358 AllocateNode* alloc = AllocateNode::Ideal_allocation(t);
4359
4360 if (as.contains(alloc)) {
4361 Node* neww = as.get_materialized_value(alloc);
4362 if (neww != nullptr && neww != t) {
4363 #ifndef PRODUCT
4364 if (PEAVerbose) {
4365 if (!printed) {
4366 map->dump();
4367 printed = true;
4368 }
4369 tty->print_cr("[PEA] replace %d with node %d", i, neww->_idx);
4370 }
4371 #endif
4372 map->set_req(i, neww);
4373 }
4374 }
4375 }
4376 }
4377 }
|