< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

4700     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
4701     // about the method being invoked should be attached to the call site to
4702     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
4703     slow_call->set_override_symbolic_info(true);
4704   }
4705   set_arguments_for_java_call(slow_call);
4706   set_edges_for_java_call(slow_call);
4707   return slow_call;
4708 }
4709 
4710 
4711 /**
4712  * Build special case code for calls to hashCode on an object. This call may
4713  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4714  * slightly different code.
4715  */
4716 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4717   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4718   assert(!(is_virtual && is_static), "either virtual, special, or static");
4719 
4720   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4721 
4722   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4723   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4724   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4725   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4726   Node* obj = nullptr;
4727   if (!is_static) {
4728     // Check for hashing null object
4729     obj = null_check_receiver();
4730     if (stopped())  return true;        // unconditionally null
4731     result_reg->init_req(_null_path, top());
4732     result_val->init_req(_null_path, top());
4733   } else {
4734     // Do a null check, and return zero if null.
4735     // System.identityHashCode(null) == 0
4736     obj = argument(0);
4737     Node* null_ctl = top();
4738     obj = null_check_oop(obj, &null_ctl);
4739     result_reg->init_req(_null_path, null_ctl);
4740     result_val->init_req(_null_path, _gvn.intcon(0));

4748     return true;
4749   }
4750 
4751   // We only go to the fast case code if we pass a number of guards.  The
4752   // paths which do not pass are accumulated in the slow_region.
4753   RegionNode* slow_region = new RegionNode(1);
4754   record_for_igvn(slow_region);
4755 
4756   // If this is a virtual call, we generate a funny guard.  We pull out
4757   // the vtable entry corresponding to hashCode() from the target object.
4758   // If the target method which we are calling happens to be the native
4759   // Object hashCode() method, we pass the guard.  We do not need this
4760   // guard for non-virtual calls -- the caller is known to be the native
4761   // Object hashCode().
4762   if (is_virtual) {
4763     // After null check, get the object's klass.
4764     Node* obj_klass = load_object_klass(obj);
4765     generate_virtual_guard(obj_klass, slow_region);
4766   }
4767 
4768   // Get the header out of the object, use LoadMarkNode when available
4769   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4770   // The control of the load must be null. Otherwise, the load can move before
4771   // the null check after castPP removal.
4772   Node* no_ctrl = nullptr;
4773   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4774 
4775   if (!UseObjectMonitorTable) {
4776     // Test the header to see if it is safe to read w.r.t. locking.
4777     Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);
4778     Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4779     Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
4780     Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4781     Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4782 
4783     generate_slow_guard(test_monitor, slow_region);
4784   }
4785 
4786   // Get the hash value and check to see that it has been properly assigned.
4787   // We depend on hash_mask being at most 32 bits and avoid the use of
4788   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4789   // vm: see markWord.hpp.
4790   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4791   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4792   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4793   // This hack lets the hash bits live anywhere in the mark object now, as long
4794   // as the shift drops the relevant bits into the low 32 bits.  Note that
4795   // Java spec says that HashCode is an int so there's no point in capturing
4796   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4797   hshifted_header      = ConvX2I(hshifted_header);
4798   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4799 
4800   Node *no_hash_val    = _gvn.intcon(markWord::no_hash);
4801   Node *chk_assigned   = _gvn.transform(new CmpINode( hash_val, no_hash_val));
4802   Node *test_assigned  = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
4803 
4804   generate_slow_guard(test_assigned, slow_region);













































































































































4805 
4806   Node* init_mem = reset_memory();
4807   // fill in the rest of the null path:
4808   result_io ->init_req(_null_path, i_o());
4809   result_mem->init_req(_null_path, init_mem);
4810 
4811   result_val->init_req(_fast_path, hash_val);
4812   result_reg->init_req(_fast_path, control());
4813   result_io ->init_req(_fast_path, i_o());
4814   result_mem->init_req(_fast_path, init_mem);
4815 





4816   // Generate code for the slow case.  We make a call to hashCode().

4817   set_control(_gvn.transform(slow_region));
4818   if (!stopped()) {
4819     // No need for PreserveJVMState, because we're using up the present state.
4820     set_all_memory(init_mem);
4821     vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4822     CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static, false);
4823     Node* slow_result = set_results_for_java_call(slow_call);
4824     // this->control() comes from set_results_for_java_call
4825     result_reg->init_req(_slow_path, control());
4826     result_val->init_req(_slow_path, slow_result);
4827     result_io  ->set_req(_slow_path, i_o());
4828     result_mem ->set_req(_slow_path, reset_memory());
4829   }
4830 
4831   // Return the combined state.
4832   set_i_o(        _gvn.transform(result_io)  );
4833   set_all_memory( _gvn.transform(result_mem));
4834 
4835   set_result(result_reg, result_val);
4836   return true;

5569     _gvn.hash_delete(alloc);
5570     alloc->set_req(TypeFunc::Control, control());
5571     alloc->set_req(TypeFunc::I_O, i_o());
5572     Node *mem = reset_memory();
5573     set_all_memory(mem);
5574     alloc->set_req(TypeFunc::Memory, mem);
5575     set_control(init->proj_out_or_null(TypeFunc::Control));
5576     set_i_o(callprojs.fallthrough_ioproj);
5577 
5578     // Update memory as done in GraphKit::set_output_for_allocation()
5579     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5580     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5581     if (ary_type->isa_aryptr() && length_type != nullptr) {
5582       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5583     }
5584     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5585     int            elemidx  = C->get_alias_index(telemref);
5586     // Need to properly move every memory projection for the Initialize
5587 #ifdef ASSERT
5588     int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
5589     int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
5590 #endif
5591     auto move_proj = [&](ProjNode* proj) {
5592       int alias_idx = C->get_alias_index(proj->adr_type());
5593       assert(alias_idx == Compile::AliasIdxRaw ||
5594              alias_idx == elemidx ||
5595              alias_idx == mark_idx ||
5596              alias_idx == klass_idx, "should be raw memory or array element type");
5597       set_memory(proj, alias_idx);
5598     };
5599     init->for_each_proj(move_proj, TypeFunc::Memory);
5600 
5601     Node* allocx = _gvn.transform(alloc);
5602     assert(allocx == alloc, "where has the allocation gone?");
5603     assert(dest->is_CheckCastPP(), "not an allocation result?");
5604 
5605     _gvn.hash_delete(dest);
5606     dest->set_req(0, control());
5607     Node* destx = _gvn.transform(dest);
5608     assert(destx == dest, "where has the allocation result gone?");
5609 

4700     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
4701     // about the method being invoked should be attached to the call site to
4702     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
4703     slow_call->set_override_symbolic_info(true);
4704   }
4705   set_arguments_for_java_call(slow_call);
4706   set_edges_for_java_call(slow_call);
4707   return slow_call;
4708 }
4709 
4710 
4711 /**
4712  * Build special case code for calls to hashCode on an object. This call may
4713  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4714  * slightly different code.
4715  */
4716 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4717   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4718   assert(!(is_virtual && is_static), "either virtual, special, or static");
4719 
4720   enum { _slow_path = 1, _null_path, _fast_path, _fast_path2, PATH_LIMIT };
4721 
4722   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4723   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4724   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4725   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4726   Node* obj = nullptr;
4727   if (!is_static) {
4728     // Check for hashing null object
4729     obj = null_check_receiver();
4730     if (stopped())  return true;        // unconditionally null
4731     result_reg->init_req(_null_path, top());
4732     result_val->init_req(_null_path, top());
4733   } else {
4734     // Do a null check, and return zero if null.
4735     // System.identityHashCode(null) == 0
4736     obj = argument(0);
4737     Node* null_ctl = top();
4738     obj = null_check_oop(obj, &null_ctl);
4739     result_reg->init_req(_null_path, null_ctl);
4740     result_val->init_req(_null_path, _gvn.intcon(0));

4748     return true;
4749   }
4750 
4751   // We only go to the fast case code if we pass a number of guards.  The
4752   // paths which do not pass are accumulated in the slow_region.
4753   RegionNode* slow_region = new RegionNode(1);
4754   record_for_igvn(slow_region);
4755 
4756   // If this is a virtual call, we generate a funny guard.  We pull out
4757   // the vtable entry corresponding to hashCode() from the target object.
4758   // If the target method which we are calling happens to be the native
4759   // Object hashCode() method, we pass the guard.  We do not need this
4760   // guard for non-virtual calls -- the caller is known to be the native
4761   // Object hashCode().
4762   if (is_virtual) {
4763     // After null check, get the object's klass.
4764     Node* obj_klass = load_object_klass(obj);
4765     generate_virtual_guard(obj_klass, slow_region);
4766   }
4767 
4768   if (UseCompactObjectHeaders) {
4769     // Get the header out of the object.
4770     Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4771     // The control of the load must be null. Otherwise, the load can move before
4772     // the null check after castPP removal.
4773     Node* no_ctrl = nullptr;
4774     Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4775 
4776     // Test the header to see if the object is in hashed or copied state.
4777     Node* hashctrl_mask  = _gvn.MakeConX(markWord::hashctrl_mask_in_place);
4778     Node* masked_header  = _gvn.transform(new AndXNode(header, hashctrl_mask));
4779 
4780     // Take slow-path when the object has not been hashed.
4781     Node* not_hashed_val = _gvn.MakeConX(0);
4782     Node* chk_hashed     = _gvn.transform(new CmpXNode(masked_header, not_hashed_val));
4783     Node* test_hashed    = _gvn.transform(new BoolNode(chk_hashed, BoolTest::eq));
4784 
4785     generate_slow_guard(test_hashed, slow_region);
4786 
4787     // Test whether the object is hashed or hashed&copied.
4788     Node* hashed_copied = _gvn.MakeConX(markWord::hashctrl_expanded_mask_in_place | markWord::hashctrl_hashed_mask_in_place);
4789     Node* chk_copied    = _gvn.transform(new CmpXNode(masked_header, hashed_copied));
4790     // If true, then object has been hashed&copied, otherwise it's only hashed.
4791     Node* test_copied   = _gvn.transform(new BoolNode(chk_copied, BoolTest::eq));
4792     IfNode* if_copied   = create_and_map_if(control(), test_copied, PROB_FAIR, COUNT_UNKNOWN);
4793     Node* if_true = _gvn.transform(new IfTrueNode(if_copied));
4794     Node* if_false = _gvn.transform(new IfFalseNode(if_copied));
4795 
4796     // Hashed&Copied path: read hash-code out of the object.
4797     set_control(if_true);
4798     // result_val->del_req(_fast_path2);
4799     // result_reg->del_req(_fast_path2);
4800     // result_io->del_req(_fast_path2);
4801     // result_mem->del_req(_fast_path2);
4802 
4803     Node* obj_klass = load_object_klass(obj);
4804     Node* hash_addr;
4805     const TypeKlassPtr* klass_t = _gvn.type(obj_klass)->isa_klassptr();
4806     bool load_offset_runtime = true;
4807 
4808     if (klass_t != nullptr) {
4809       if (klass_t->klass_is_exact()  && klass_t->isa_instklassptr()) {
4810         ciInstanceKlass* ciKlass = reinterpret_cast<ciInstanceKlass*>(klass_t->is_instklassptr()->exact_klass());
4811         if (!ciKlass->is_mirror_instance_klass() && !ciKlass->is_reference_instance_klass()) {
4812           // We know the InstanceKlass, load hash_offset from there at compile-time.
4813           int hash_offset = ciKlass->hash_offset_in_bytes();
4814           hash_addr = basic_plus_adr(obj, hash_offset);
4815           Node* loaded_hash = make_load(control(), hash_addr, TypeInt::INT, T_INT, MemNode::unordered);
4816           result_val->init_req(_fast_path2, loaded_hash);
4817           result_reg->init_req(_fast_path2, control());
4818           load_offset_runtime = false;
4819         }
4820       }
4821     }
4822 
4823     //tty->print_cr("Load hash-offset at runtime: %s", BOOL_TO_STR(load_offset_runtime));
4824 
4825     if (load_offset_runtime) {
4826       // We don't know if it is an array or an exact type, figure it out at run-time.
4827       // If not an ordinary instance, then we need to take slow-path.
4828       Node* kind_addr = basic_plus_adr(top(), obj_klass, Klass::kind_offset_in_bytes());
4829       Node* kind = make_load(control(), kind_addr, TypeInt::INT, T_INT, MemNode::unordered);
4830       Node* instance_val = _gvn.intcon(Klass::InstanceKlassKind);
4831       Node* chk_inst     = _gvn.transform(new CmpINode(kind, instance_val));
4832       Node* test_inst    = _gvn.transform(new BoolNode(chk_inst, BoolTest::ne));
4833       generate_slow_guard(test_inst, slow_region);
4834 
4835       // Otherwise it's an instance and we can read the hash_offset from the InstanceKlass.
4836       Node* hash_offset_addr = basic_plus_adr(top(), obj_klass, InstanceKlass::hash_offset_offset_in_bytes());
4837       Node* hash_offset = make_load(control(), hash_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
4838       // hash_offset->dump();
4839       Node* hash_addr = basic_plus_adr(obj, ConvI2X(hash_offset));
4840       Compile::current()->set_has_unsafe_access(true);
4841       Node* loaded_hash = make_load(control(), hash_addr, TypeInt::INT, T_INT, MemNode::unordered);
4842       result_val->init_req(_fast_path2, loaded_hash);
4843       result_reg->init_req(_fast_path2, control());
4844     }
4845 
4846     // Hashed-only path: recompute hash-code from object address.
4847     set_control(if_false);
4848     if (hashCode == 6) {
4849       // Our constants.
4850       Node* M = _gvn.intcon(0x337954D5);
4851       Node* A = _gvn.intcon(0xAAAAAAAA);
4852       // Split object address into lo and hi 32 bits.
4853       Node* obj_addr = _gvn.transform(new CastP2XNode(nullptr, obj));
4854       Node* x = _gvn.transform(new ConvL2INode(obj_addr));
4855       Node* upper_addr = _gvn.transform(new URShiftLNode(obj_addr, _gvn.intcon(32)));
4856       Node* y = _gvn.transform(new ConvL2INode(upper_addr));
4857 
4858       Node* H0 = _gvn.transform(new XorINode(x, y));
4859       Node* L0 = _gvn.transform(new XorINode(x, A));
4860 
4861       // Full multiplication of two 32 bit values L0 and M into a hi/lo result in two 32 bit values V0 and U0.
4862       Node* L0_64 = _gvn.transform(new ConvI2LNode(L0));
4863       L0_64 = _gvn.transform(new AndLNode(L0_64, _gvn.longcon(0xFFFFFFFF)));
4864       Node* M_64 = _gvn.transform(new ConvI2LNode(M));
4865       // M_64 = _gvn.transform(new AndLNode(M_64, _gvn.longcon(0xFFFFFFFF)));
4866       Node* prod64 = _gvn.transform(new MulLNode(L0_64, M_64));
4867       Node* V0 = _gvn.transform(new ConvL2INode(prod64));
4868       Node* prod_upper = _gvn.transform(new URShiftLNode(prod64, _gvn.intcon(32)));
4869       Node* U0 = _gvn.transform(new ConvL2INode(prod_upper));
4870 
4871       Node* Q0 = _gvn.transform(new MulINode(H0, M));
4872       Node* L1 = _gvn.transform(new XorINode(Q0, U0));
4873 
4874       // Full multiplication of two 32 bit values L1 and M into a hi/lo result in two 32 bit values V1 and U1.
4875       Node* L1_64 = _gvn.transform(new ConvI2LNode(L1));
4876       L1_64 = _gvn.transform(new AndLNode(L1_64, _gvn.longcon(0xFFFFFFFF)));
4877       prod64 = _gvn.transform(new MulLNode(L1_64, M_64));
4878       Node* V1 = _gvn.transform(new ConvL2INode(prod64));
4879       prod_upper = _gvn.transform(new URShiftLNode(prod64, _gvn.intcon(32)));
4880       Node* U1 = _gvn.transform(new ConvL2INode(prod_upper));
4881 
4882       Node* P1 = _gvn.transform(new XorINode(V0, M));
4883 
4884       // Right rotate P1 by distance L1.
4885       Node* distance = _gvn.transform(new AndINode(L1, _gvn.intcon(32 - 1)));
4886       Node* inverse_distance = _gvn.transform(new SubINode(_gvn.intcon(32), distance));
4887       Node* ror_part1 = _gvn.transform(new URShiftINode(P1, distance));
4888       Node* ror_part2 = _gvn.transform(new LShiftINode(P1, inverse_distance));
4889       Node* Q1 = _gvn.transform(new OrINode(ror_part1, ror_part2));
4890 
4891       Node* L2 = _gvn.transform(new XorINode(Q1, U1));
4892       Node* hash = _gvn.transform(new XorINode(V1, L2));
4893       Node* hash_truncated = _gvn.transform(new AndINode(hash, _gvn.intcon(markWord::hash_mask)));
4894 
4895       result_val->init_req(_fast_path, hash_truncated);
4896     } else if (hashCode == 2) {
4897       result_val->init_req(_fast_path, _gvn.intcon(1));
4898     }
4899   } else {
4900     // Get the header out of the object, use LoadMarkNode when available
4901     Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4902     // The control of the load must be null. Otherwise, the load can move before
4903     // the null check after castPP removal.
4904     Node* no_ctrl = nullptr;
4905     Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4906 
4907     if (!UseObjectMonitorTable) {
4908       // Test the header to see if it is safe to read w.r.t. locking.
4909       Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);
4910       Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4911       Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
4912       Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4913       Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4914 
4915       generate_slow_guard(test_monitor, slow_region);
4916     }
4917 
4918     // Get the hash value and check to see that it has been properly assigned.
4919     // We depend on hash_mask being at most 32 bits and avoid the use of
4920     // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4921     // vm: see markWord.hpp.
4922     Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4923     Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4924     Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4925     // This hack lets the hash bits live anywhere in the mark object now, as long
4926     // as the shift drops the relevant bits into the low 32 bits.  Note that
4927     // Java spec says that HashCode is an int so there's no point in capturing
4928     // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4929     hshifted_header      = ConvX2I(hshifted_header);
4930     Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4931 
4932     Node *no_hash_val    = _gvn.intcon(markWord::no_hash);
4933     Node *chk_assigned   = _gvn.transform(new CmpINode( hash_val, no_hash_val));
4934     Node *test_assigned  = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
4935 
4936     generate_slow_guard(test_assigned, slow_region);
4937 
4938     result_val->init_req(_fast_path, hash_val);
4939 
4940     // _fast_path2 is not used here.
4941     result_val->del_req(_fast_path2);
4942     result_reg->del_req(_fast_path2);
4943     result_io->del_req(_fast_path2);
4944     result_mem->del_req(_fast_path2);
4945   }
4946 
4947   Node* init_mem = reset_memory();
4948   // fill in the rest of the null path:
4949   result_io ->init_req(_null_path, i_o());
4950   result_mem->init_req(_null_path, init_mem);
4951 

4952   result_reg->init_req(_fast_path, control());
4953   result_io ->init_req(_fast_path, i_o());
4954   result_mem->init_req(_fast_path, init_mem);
4955 
4956   if (UseCompactObjectHeaders) {
4957     result_io->init_req(_fast_path2, i_o());
4958     result_mem->init_req(_fast_path2, init_mem);
4959   }
4960 
4961   // Generate code for the slow case.  We make a call to hashCode().
4962   assert(slow_region != nullptr, "must have slow_region");
4963   set_control(_gvn.transform(slow_region));
4964   if (!stopped()) {
4965     // No need for PreserveJVMState, because we're using up the present state.
4966     set_all_memory(init_mem);
4967     vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4968     CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static, false);
4969     Node* slow_result = set_results_for_java_call(slow_call);
4970     // this->control() comes from set_results_for_java_call
4971     result_reg->init_req(_slow_path, control());
4972     result_val->init_req(_slow_path, slow_result);
4973     result_io  ->set_req(_slow_path, i_o());
4974     result_mem ->set_req(_slow_path, reset_memory());
4975   }
4976 
4977   // Return the combined state.
4978   set_i_o(        _gvn.transform(result_io)  );
4979   set_all_memory( _gvn.transform(result_mem));
4980 
4981   set_result(result_reg, result_val);
4982   return true;

5715     _gvn.hash_delete(alloc);
5716     alloc->set_req(TypeFunc::Control, control());
5717     alloc->set_req(TypeFunc::I_O, i_o());
5718     Node *mem = reset_memory();
5719     set_all_memory(mem);
5720     alloc->set_req(TypeFunc::Memory, mem);
5721     set_control(init->proj_out_or_null(TypeFunc::Control));
5722     set_i_o(callprojs.fallthrough_ioproj);
5723 
5724     // Update memory as done in GraphKit::set_output_for_allocation()
5725     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5726     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5727     if (ary_type->isa_aryptr() && length_type != nullptr) {
5728       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5729     }
5730     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5731     int            elemidx  = C->get_alias_index(telemref);
5732     // Need to properly move every memory projection for the Initialize
5733 #ifdef ASSERT
5734     int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
5735     int klass_idx = C->get_alias_index(ary_type->add_offset(Type::klass_offset()));
5736 #endif
5737     auto move_proj = [&](ProjNode* proj) {
5738       int alias_idx = C->get_alias_index(proj->adr_type());
5739       assert(alias_idx == Compile::AliasIdxRaw ||
5740              alias_idx == elemidx ||
5741              alias_idx == mark_idx ||
5742              alias_idx == klass_idx, "should be raw memory or array element type");
5743       set_memory(proj, alias_idx);
5744     };
5745     init->for_each_proj(move_proj, TypeFunc::Memory);
5746 
5747     Node* allocx = _gvn.transform(alloc);
5748     assert(allocx == alloc, "where has the allocation gone?");
5749     assert(dest->is_CheckCastPP(), "not an allocation result?");
5750 
5751     _gvn.hash_delete(dest);
5752     dest->set_req(0, control());
5753     Node* destx = _gvn.transform(dest);
5754     assert(destx == dest, "where has the allocation result gone?");
5755 
< prev index next >