4643 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
4644 // about the method being invoked should be attached to the call site to
4645 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
4646 slow_call->set_override_symbolic_info(true);
4647 }
4648 set_arguments_for_java_call(slow_call);
4649 set_edges_for_java_call(slow_call);
4650 return slow_call;
4651 }
4652
4653
4654 /**
4655 * Build special case code for calls to hashCode on an object. This call may
4656 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4657 * slightly different code.
4658 */
4659 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4660 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4661 assert(!(is_virtual && is_static), "either virtual, special, or static");
4662
4663 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4664
4665 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4666 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4667 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4668 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4669 Node* obj = nullptr;
4670 if (!is_static) {
4671 // Check for hashing null object
4672 obj = null_check_receiver();
4673 if (stopped()) return true; // unconditionally null
4674 result_reg->init_req(_null_path, top());
4675 result_val->init_req(_null_path, top());
4676 } else {
4677 // Do a null check, and return zero if null.
4678 // System.identityHashCode(null) == 0
4679 obj = argument(0);
4680 Node* null_ctl = top();
4681 obj = null_check_oop(obj, &null_ctl);
4682 result_reg->init_req(_null_path, null_ctl);
4683 result_val->init_req(_null_path, _gvn.intcon(0));
4691 return true;
4692 }
4693
4694 // We only go to the fast case code if we pass a number of guards. The
4695 // paths which do not pass are accumulated in the slow_region.
4696 RegionNode* slow_region = new RegionNode(1);
4697 record_for_igvn(slow_region);
4698
4699 // If this is a virtual call, we generate a funny guard. We pull out
4700 // the vtable entry corresponding to hashCode() from the target object.
4701 // If the target method which we are calling happens to be the native
4702 // Object hashCode() method, we pass the guard. We do not need this
4703 // guard for non-virtual calls -- the caller is known to be the native
4704 // Object hashCode().
4705 if (is_virtual) {
4706 // After null check, get the object's klass.
4707 Node* obj_klass = load_object_klass(obj);
4708 generate_virtual_guard(obj_klass, slow_region);
4709 }
4710
4711 // Get the header out of the object, use LoadMarkNode when available
4712 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4713 // The control of the load must be null. Otherwise, the load can move before
4714 // the null check after castPP removal.
4715 Node* no_ctrl = nullptr;
4716 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4717
4718 if (!UseObjectMonitorTable) {
4719 // Test the header to see if it is safe to read w.r.t. locking.
4720 Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
4721 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4722 if (LockingMode == LM_LIGHTWEIGHT) {
4723 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
4724 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4725 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4726
4727 generate_slow_guard(test_monitor, slow_region);
4728 } else {
4729 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4730 Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4731 Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4732
4733 generate_slow_guard(test_not_unlocked, slow_region);
4734 }
4735 }
4736
4737 // Get the hash value and check to see that it has been properly assigned.
4738 // We depend on hash_mask being at most 32 bits and avoid the use of
4739 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4740 // vm: see markWord.hpp.
4741 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4742 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
4743 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4744 // This hack lets the hash bits live anywhere in the mark object now, as long
4745 // as the shift drops the relevant bits into the low 32 bits. Note that
4746 // Java spec says that HashCode is an int so there's no point in capturing
4747 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4748 hshifted_header = ConvX2I(hshifted_header);
4749 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4750
4751 Node *no_hash_val = _gvn.intcon(markWord::no_hash);
4752 Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
4753 Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
4754
4755 generate_slow_guard(test_assigned, slow_region);
4756
4757 Node* init_mem = reset_memory();
4758 // fill in the rest of the null path:
4759 result_io ->init_req(_null_path, i_o());
4760 result_mem->init_req(_null_path, init_mem);
4761
4762 result_val->init_req(_fast_path, hash_val);
4763 result_reg->init_req(_fast_path, control());
4764 result_io ->init_req(_fast_path, i_o());
4765 result_mem->init_req(_fast_path, init_mem);
4766
4767 // Generate code for the slow case. We make a call to hashCode().
4768 set_control(_gvn.transform(slow_region));
4769 if (!stopped()) {
4770 // No need for PreserveJVMState, because we're using up the present state.
4771 set_all_memory(init_mem);
4772 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4773 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static, false);
4774 Node* slow_result = set_results_for_java_call(slow_call);
4775 // this->control() comes from set_results_for_java_call
4776 result_reg->init_req(_slow_path, control());
4777 result_val->init_req(_slow_path, slow_result);
4778 result_io ->set_req(_slow_path, i_o());
4779 result_mem ->set_req(_slow_path, reset_memory());
4780 }
4781
4782 // Return the combined state.
4783 set_i_o( _gvn.transform(result_io) );
4784 set_all_memory( _gvn.transform(result_mem));
4785
4786 set_result(result_reg, result_val);
4787 return true;
|
4643 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
4644 // about the method being invoked should be attached to the call site to
4645 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
4646 slow_call->set_override_symbolic_info(true);
4647 }
4648 set_arguments_for_java_call(slow_call);
4649 set_edges_for_java_call(slow_call);
4650 return slow_call;
4651 }
4652
4653
4654 /**
4655 * Build special case code for calls to hashCode on an object. This call may
4656 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4657 * slightly different code.
4658 */
4659 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4660 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4661 assert(!(is_virtual && is_static), "either virtual, special, or static");
4662
4663 enum { _slow_path = 1, _null_path, _fast_path, _fast_path2, PATH_LIMIT };
4664
4665 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4666 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4667 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4668 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4669 Node* obj = nullptr;
4670 if (!is_static) {
4671 // Check for hashing null object
4672 obj = null_check_receiver();
4673 if (stopped()) return true; // unconditionally null
4674 result_reg->init_req(_null_path, top());
4675 result_val->init_req(_null_path, top());
4676 } else {
4677 // Do a null check, and return zero if null.
4678 // System.identityHashCode(null) == 0
4679 obj = argument(0);
4680 Node* null_ctl = top();
4681 obj = null_check_oop(obj, &null_ctl);
4682 result_reg->init_req(_null_path, null_ctl);
4683 result_val->init_req(_null_path, _gvn.intcon(0));
4691 return true;
4692 }
4693
4694 // We only go to the fast case code if we pass a number of guards. The
4695 // paths which do not pass are accumulated in the slow_region.
4696 RegionNode* slow_region = new RegionNode(1);
4697 record_for_igvn(slow_region);
4698
4699 // If this is a virtual call, we generate a funny guard. We pull out
4700 // the vtable entry corresponding to hashCode() from the target object.
4701 // If the target method which we are calling happens to be the native
4702 // Object hashCode() method, we pass the guard. We do not need this
4703 // guard for non-virtual calls -- the caller is known to be the native
4704 // Object hashCode().
4705 if (is_virtual) {
4706 // After null check, get the object's klass.
4707 Node* obj_klass = load_object_klass(obj);
4708 generate_virtual_guard(obj_klass, slow_region);
4709 }
4710
4711 if (UseCompactObjectHeaders) {
4712 // Get the header out of the object.
4713 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4714 // The control of the load must be null. Otherwise, the load can move before
4715 // the null check after castPP removal.
4716 Node* no_ctrl = nullptr;
4717 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4718
4719 // Test the header to see if the object is in hashed or copied state.
4720 Node* hashctrl_mask = _gvn.MakeConX(markWord::hashctrl_mask_in_place);
4721 Node* masked_header = _gvn.transform(new AndXNode(header, hashctrl_mask));
4722
4723 // Take slow-path when the object has not been hashed.
4724 Node* not_hashed_val = _gvn.MakeConX(0);
4725 Node* chk_hashed = _gvn.transform(new CmpXNode(masked_header, not_hashed_val));
4726 Node* test_hashed = _gvn.transform(new BoolNode(chk_hashed, BoolTest::eq));
4727
4728 generate_slow_guard(test_hashed, slow_region);
4729
4730 // Test whether the object is hashed or hashed&copied.
4731 Node* hashed_copied = _gvn.MakeConX(markWord::hashctrl_expanded_mask_in_place | markWord::hashctrl_hashed_mask_in_place);
4732 Node* chk_copied = _gvn.transform(new CmpXNode(masked_header, hashed_copied));
4733 // If true, then object has been hashed&copied, otherwise it's only hashed.
4734 Node* test_copied = _gvn.transform(new BoolNode(chk_copied, BoolTest::eq));
4735 IfNode* if_copied = create_and_map_if(control(), test_copied, PROB_FAIR, COUNT_UNKNOWN);
4736 Node* if_true = _gvn.transform(new IfTrueNode(if_copied));
4737 Node* if_false = _gvn.transform(new IfFalseNode(if_copied));
4738
4739 // Hashed&Copied path: read hash-code out of the object.
4740 set_control(if_true);
4741 // result_val->del_req(_fast_path2);
4742 // result_reg->del_req(_fast_path2);
4743 // result_io->del_req(_fast_path2);
4744 // result_mem->del_req(_fast_path2);
4745
4746 Node* obj_klass = load_object_klass(obj);
4747 Node* hash_addr;
4748 const TypeKlassPtr* klass_t = _gvn.type(obj_klass)->isa_klassptr();
4749 bool load_offset_runtime = true;
4750
4751 if (klass_t != nullptr) {
4752 if (klass_t->klass_is_exact() && klass_t->isa_instklassptr()) {
4753 ciInstanceKlass* ciKlass = reinterpret_cast<ciInstanceKlass*>(klass_t->is_instklassptr()->exact_klass());
4754 if (!ciKlass->is_mirror_instance_klass() && !ciKlass->is_reference_instance_klass()) {
4755 // We know the InstanceKlass, load hash_offset from there at compile-time.
4756 int hash_offset = ciKlass->hash_offset_in_bytes();
4757 hash_addr = basic_plus_adr(obj, hash_offset);
4758 Node* loaded_hash = make_load(control(), hash_addr, TypeInt::INT, T_INT, MemNode::unordered);
4759 result_val->init_req(_fast_path2, loaded_hash);
4760 result_reg->init_req(_fast_path2, control());
4761 load_offset_runtime = false;
4762 }
4763 }
4764 }
4765
4766 //tty->print_cr("Load hash-offset at runtime: %s", BOOL_TO_STR(load_offset_runtime));
4767
4768 if (load_offset_runtime) {
4769 // We don't know if it is an array or an exact type, figure it out at run-time.
4770 // If not an ordinary instance, then we need to take slow-path.
4771 Node* kind_addr = basic_plus_adr(obj_klass, Klass::kind_offset_in_bytes());
4772 Node* kind = make_load(control(), kind_addr, TypeInt::INT, T_INT, MemNode::unordered);
4773 Node* instance_val = _gvn.intcon(Klass::InstanceKlassKind);
4774 Node* chk_inst = _gvn.transform(new CmpINode(kind, instance_val));
4775 Node* test_inst = _gvn.transform(new BoolNode(chk_inst, BoolTest::ne));
4776 generate_slow_guard(test_inst, slow_region);
4777
4778 // Otherwise it's an instance and we can read the hash_offset from the InstanceKlass.
4779 Node* hash_offset_addr = basic_plus_adr(obj_klass, InstanceKlass::hash_offset_offset_in_bytes());
4780 Node* hash_offset = make_load(control(), hash_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
4781 // hash_offset->dump();
4782 Node* hash_addr = basic_plus_adr(obj, ConvI2X(hash_offset));
4783 Compile::current()->set_has_unsafe_access(true);
4784 Node* loaded_hash = make_load(control(), hash_addr, TypeInt::INT, T_INT, MemNode::unordered);
4785 result_val->init_req(_fast_path2, loaded_hash);
4786 result_reg->init_req(_fast_path2, control());
4787 }
4788
4789 // Hashed-only path: recompute hash-code from object address.
4790 set_control(if_false);
4791 // Our constants.
4792 Node* M = _gvn.intcon(0x337954D5);
4793 Node* A = _gvn.intcon(0xAAAAAAAA);
4794 // Split object address into lo and hi 32 bits.
4795 Node* obj_addr = _gvn.transform(new CastP2XNode(nullptr, obj));
4796 Node* x = _gvn.transform(new ConvL2INode(obj_addr));
4797 Node* upper_addr = _gvn.transform(new URShiftLNode(obj_addr, _gvn.intcon(32)));
4798 Node* y = _gvn.transform(new ConvL2INode(upper_addr));
4799
4800 Node* H0 = _gvn.transform(new XorINode(x, y));
4801 Node* L0 = _gvn.transform(new XorINode(x, A));
4802
4803 // Full multiplication of two 32 bit values L0 and M into a hi/lo result in two 32 bit values V0 and U0.
4804 Node* L0_64 = _gvn.transform(new ConvI2LNode(L0));
4805 L0_64 = _gvn.transform(new AndLNode(L0_64, _gvn.longcon(0xFFFFFFFF)));
4806 Node* M_64 = _gvn.transform(new ConvI2LNode(M));
4807 // M_64 = _gvn.transform(new AndLNode(M_64, _gvn.longcon(0xFFFFFFFF)));
4808 Node* prod64 = _gvn.transform(new MulLNode(L0_64, M_64));
4809 Node* V0 = _gvn.transform(new ConvL2INode(prod64));
4810 Node* prod_upper = _gvn.transform(new URShiftLNode(prod64, _gvn.intcon(32)));
4811 Node* U0 = _gvn.transform(new ConvL2INode(prod_upper));
4812
4813 Node* Q0 = _gvn.transform(new MulINode(H0, M));
4814 Node* L1 = _gvn.transform(new XorINode(Q0, U0));
4815
4816 // Full multiplication of two 32 bit values L1 and M into a hi/lo result in two 32 bit values V1 and U1.
4817 Node* L1_64 = _gvn.transform(new ConvI2LNode(L1));
4818 L1_64 = _gvn.transform(new AndLNode(L1_64, _gvn.longcon(0xFFFFFFFF)));
4819 prod64 = _gvn.transform(new MulLNode(L1_64, M_64));
4820 Node* V1 = _gvn.transform(new ConvL2INode(prod64));
4821 prod_upper = _gvn.transform(new URShiftLNode(prod64, _gvn.intcon(32)));
4822 Node* U1 = _gvn.transform(new ConvL2INode(prod_upper));
4823
4824 Node* P1 = _gvn.transform(new XorINode(V0, M));
4825
4826 // Right rotate P1 by distance L1.
4827 Node* distance = _gvn.transform(new AndINode(L1, _gvn.intcon(32 - 1)));
4828 Node* inverse_distance = _gvn.transform(new SubINode(_gvn.intcon(32), distance));
4829 Node* ror_part1 = _gvn.transform(new URShiftINode(P1, distance));
4830 Node* ror_part2 = _gvn.transform(new LShiftINode(P1, inverse_distance));
4831 Node* Q1 = _gvn.transform(new OrINode(ror_part1, ror_part2));
4832
4833 Node* L2 = _gvn.transform(new XorINode(Q1, U1));
4834 Node* hash = _gvn.transform(new XorINode(V1, L2));
4835 Node* hash_truncated = _gvn.transform(new AndINode(hash, _gvn.intcon(markWord::hash_mask)));
4836
4837 // TODO: We could generate a fast case here under the following conditions:
4838 // - The hashctrl is set to hash_is_copied (see markWord::hash_is_copied())
4839 // - The type of the object is known
4840 // Then we can load the identity hashcode from the int field at Klass::hash_offset_in_bytes() of the object.
4841 result_val->init_req(_fast_path, hash_truncated);
4842 } else {
4843 // Get the header out of the object, use LoadMarkNode when available
4844 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4845 // The control of the load must be null. Otherwise, the load can move before
4846 // the null check after castPP removal.
4847 Node* no_ctrl = nullptr;
4848 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4849
4850 if (!UseObjectMonitorTable) {
4851 // Test the header to see if it is safe to read w.r.t. locking.
4852 Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
4853 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4854 if (LockingMode == LM_LIGHTWEIGHT) {
4855 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
4856 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4857 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4858
4859 generate_slow_guard(test_monitor, slow_region);
4860 } else {
4861 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4862 Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4863 Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4864
4865 generate_slow_guard(test_not_unlocked, slow_region);
4866 }
4867 }
4868
4869 // Get the hash value and check to see that it has been properly assigned.
4870 // We depend on hash_mask being at most 32 bits and avoid the use of
4871 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4872 // vm: see markWord.hpp.
4873 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4874 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
4875 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4876 // This hack lets the hash bits live anywhere in the mark object now, as long
4877 // as the shift drops the relevant bits into the low 32 bits. Note that
4878 // Java spec says that HashCode is an int so there's no point in capturing
4879 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4880 hshifted_header = ConvX2I(hshifted_header);
4881 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4882
4883 Node *no_hash_val = _gvn.intcon(markWord::no_hash);
4884 Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
4885 Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
4886
4887 generate_slow_guard(test_assigned, slow_region);
4888
4889 result_val->init_req(_fast_path, hash_val);
4890
4891 // _fast_path2 is not used here.
4892 result_val->del_req(_fast_path2);
4893 result_reg->del_req(_fast_path2);
4894 result_io->del_req(_fast_path2);
4895 result_mem->del_req(_fast_path2);
4896 }
4897
4898 Node* init_mem = reset_memory();
4899 // fill in the rest of the null path:
4900 result_io ->init_req(_null_path, i_o());
4901 result_mem->init_req(_null_path, init_mem);
4902
4903 result_reg->init_req(_fast_path, control());
4904 result_io ->init_req(_fast_path, i_o());
4905 result_mem->init_req(_fast_path, init_mem);
4906
4907 if (UseCompactObjectHeaders) {
4908 result_io->init_req(_fast_path2, i_o());
4909 result_mem->init_req(_fast_path2, init_mem);
4910 }
4911
4912 // Generate code for the slow case. We make a call to hashCode().
4913 assert(slow_region != nullptr, "must have slow_region");
4914 set_control(_gvn.transform(slow_region));
4915 if (!stopped()) {
4916 // No need for PreserveJVMState, because we're using up the present state.
4917 set_all_memory(init_mem);
4918 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4919 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static, false);
4920 Node* slow_result = set_results_for_java_call(slow_call);
4921 // this->control() comes from set_results_for_java_call
4922 result_reg->init_req(_slow_path, control());
4923 result_val->init_req(_slow_path, slow_result);
4924 result_io ->set_req(_slow_path, i_o());
4925 result_mem ->set_req(_slow_path, reset_memory());
4926 }
4927
4928 // Return the combined state.
4929 set_i_o( _gvn.transform(result_io) );
4930 set_all_memory( _gvn.transform(result_mem));
4931
4932 set_result(result_reg, result_val);
4933 return true;
|