18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "ci/ciNativeEntryPoint.hpp"
44 #include "utilities/debug.hpp"
45
46 // Utility function.
47 const TypeFunc* CallGenerator::tf() const {
48 return TypeFunc::make(method());
49 }
50
51 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
52 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
53 }
54
55 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
56 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
57 return is_inlined_method_handle_intrinsic(symbolic_info, m);
101 GraphKit& exits = parser.exits();
102
103 if (C->failing()) {
104 while (exits.pop_exception_state() != NULL) ;
105 return NULL;
106 }
107
108 assert(exits.jvms()->same_calls_as(jvms), "sanity");
109
110 // Simply return the exit state of the parser,
111 // augmented by any exceptional states.
112 return exits.transfer_exceptions_into_jvms();
113 }
114
115 //---------------------------DirectCallGenerator------------------------------
116 // Internal class which handles all out-of-line calls w/o receiver type checks.
117 class DirectCallGenerator : public CallGenerator {
118 private:
119 CallStaticJavaNode* _call_node;
120 // Force separate memory and I/O projections for the exceptional
121 // paths to facilitate late inlinig.
122 bool _separate_io_proj;
123
124 protected:
125 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
126
127 public:
128 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
129 : CallGenerator(method),
130 _separate_io_proj(separate_io_proj)
131 {
132 }
133 virtual JVMState* generate(JVMState* jvms);
134
135 virtual CallNode* call_node() const { return _call_node; }
136 virtual CallGenerator* with_call_node(CallNode* call) {
137 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
138 dcg->set_call_node(call->as_CallStaticJava());
139 return dcg;
140 }
141 };
142
143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
144 GraphKit kit(jvms);
145 kit.C->print_inlining_update(this);
146 bool is_static = method()->is_static();
147 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
148 : SharedRuntime::get_resolve_opt_virtual_call_stub();
149
150 if (kit.C->log() != NULL) {
151 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
152 }
153
154 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
155 if (is_inlined_method_handle_intrinsic(jvms, method())) {
156 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
157 // additional information about the method being invoked should be attached
158 // to the call site to make resolution logic work
159 // (see SharedRuntime::resolve_static_call_C).
160 call->set_override_symbolic_info(true);
161 }
162 _call_node = call; // Save the call node in case we need it later
163 if (!is_static) {
164 // Make an explicit receiver null_check as part of this call.
165 // Since we share a map with the caller, his JVMS gets adjusted.
166 kit.null_check_receiver_before_call(method());
167 if (kit.stopped()) {
168 // And dump it back to the caller, decorated with any exceptions:
169 return kit.transfer_exceptions_into_jvms();
170 }
171 // Mark the call node as virtual, sort of:
172 call->set_optimized_virtual(true);
173 if (method()->is_method_handle_intrinsic() ||
174 method()->is_compiled_lambda_form()) {
175 call->set_method_handle_invoke(true);
176 }
177 }
178 kit.set_arguments_for_java_call(call);
179 kit.set_edges_for_java_call(call, false, _separate_io_proj);
180 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
181 kit.push_node(method()->return_type()->basic_type(), ret);
182 return kit.transfer_exceptions_into_jvms();
183 }
184
185 //--------------------------VirtualCallGenerator------------------------------
186 // Internal class which handles all out-of-line calls checking receiver type.
187 class VirtualCallGenerator : public CallGenerator {
188 private:
189 int _vtable_index;
190 bool _separate_io_proj;
191 CallDynamicJavaNode* _call_node;
192
193 protected:
194 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
195
196 public:
197 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
198 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)
199 {
200 assert(vtable_index == Method::invalid_vtable_index ||
201 vtable_index >= 0, "either invalid or usable");
202 }
203 virtual bool is_virtual() const { return true; }
204 virtual JVMState* generate(JVMState* jvms);
205
206 virtual CallNode* call_node() const { return _call_node; }
207 int vtable_index() const { return _vtable_index; }
208
209 virtual CallGenerator* with_call_node(CallNode* call) {
210 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
211 cg->set_call_node(call->as_CallDynamicJava());
212 return cg;
213 }
214 };
215
216 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
217 GraphKit kit(jvms);
218 Node* receiver = kit.argument(0);
219
220 kit.C->print_inlining_update(this);
221
222 if (kit.C->log() != NULL) {
223 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
224 }
225
226 // If the receiver is a constant null, do not torture the system
227 // by attempting to call through it. The compile will proceed
228 // correctly, but may bail out in final_graph_reshaping, because
229 // the call instruction will have a seemingly deficient out-count.
230 // (The bailout says something misleading about an "infinite loop".)
231 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
232 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
233 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
234 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
235 kit.inc_sp(arg_size); // restore arguments
236 kit.uncommon_trap(Deoptimization::Reason_null_check,
237 Deoptimization::Action_none,
238 NULL, "null receiver");
239 return kit.transfer_exceptions_into_jvms();
240 }
241
242 // Ideally we would unconditionally do a null check here and let it
243 // be converted to an implicit check based on profile information.
244 // However currently the conversion to implicit null checks in
245 // Block::implicit_null_check() only looks for loads and stores, not calls.
246 ciMethod *caller = kit.method();
247 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
248 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
249 ((ImplicitNullCheckThreshold > 0) && caller_md &&
250 (caller_md->trap_count(Deoptimization::Reason_null_check)
251 >= (uint)ImplicitNullCheckThreshold))) {
259 }
260
261 assert(!method()->is_static(), "virtual call must not be to static");
262 assert(!method()->is_final(), "virtual call should not be to final");
263 assert(!method()->is_private(), "virtual call should not be to private");
264 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
265 "no vtable calls if +UseInlineCaches ");
266 address target = SharedRuntime::get_resolve_virtual_call_stub();
267 // Normal inline cache used for call
268 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
269 if (is_inlined_method_handle_intrinsic(jvms, method())) {
270 // To be able to issue a direct call (optimized virtual or virtual)
271 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
272 // about the method being invoked should be attached to the call site to
273 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
274 call->set_override_symbolic_info(true);
275 }
276 _call_node = call; // Save the call node in case we need it later
277
278 kit.set_arguments_for_java_call(call);
279 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
280 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
281 kit.push_node(method()->return_type()->basic_type(), ret);
282
283 // Represent the effect of an implicit receiver null_check
284 // as part of this call. Since we share a map with the caller,
285 // his JVMS gets adjusted.
286 kit.cast_not_null(receiver);
287 return kit.transfer_exceptions_into_jvms();
288 }
289
290 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
291 if (InlineTree::check_can_parse(m) != NULL) return NULL;
292 return new ParseGenerator(m, expected_uses);
293 }
294
295 // As a special case, the JVMS passed to this CallGenerator is
296 // for the method execution already in progress, not just the JVMS
297 // of the caller. Thus, this CallGenerator cannot be mixed with others!
298 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
352 return DirectCallGenerator::generate(jvms);
353 }
354
355 virtual void print_inlining_late(const char* msg) {
356 CallNode* call = call_node();
357 Compile* C = Compile::current();
358 C->print_inlining_assert_ready();
359 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
360 C->print_inlining_move_to(this);
361 C->print_inlining_update_delayed(this);
362 }
363
364 virtual void set_unique_id(jlong id) {
365 _unique_id = id;
366 }
367
368 virtual jlong unique_id() const {
369 return _unique_id;
370 }
371
372 virtual CallGenerator* with_call_node(CallNode* call) {
373 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
374 cg->set_call_node(call->as_CallStaticJava());
375 return cg;
376 }
377 };
378
379 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
380 return new LateInlineCallGenerator(method, inline_cg);
381 }
382
383 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
384 ciMethod* _caller;
385 bool _input_not_const;
386
387 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
388
389 public:
390 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
391 LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}
413 cg->set_call_node(call->as_CallStaticJava());
414 return cg;
415 }
416 };
417
418 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
419 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
420 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method wih
421 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
422 // of late inlining with exceptions.
423 assert(!jvms->method()->has_exception_handlers() ||
424 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
425 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
426 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
427 bool allow_inline = C->inlining_incrementally();
428 bool input_not_const = true;
429 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
430 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
431
432 if (cg != NULL) {
433 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
434 _inline_cg = cg;
435 C->dec_number_of_mh_late_inlines();
436 return true;
437 } else {
438 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
439 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
440 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
441 return false;
442 }
443 }
444
445 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
446 assert(IncrementalInlineMH, "required");
447 Compile::current()->inc_number_of_mh_late_inlines();
448 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
449 return cg;
450 }
451
452 // Allow inlining decisions to be delayed
631
632 #ifndef PRODUCT
633 if (PrintEliminateAllocations) {
634 tty->print("++++ Eliminated: %d ", call->_idx);
635 call->as_CallStaticJava()->method()->print_short_name(tty);
636 tty->cr();
637 }
638 #endif
639 }
640
641 void CallGenerator::do_late_inline_helper() {
642 assert(is_late_inline(), "only late inline allowed");
643
644 // Can't inline it
645 CallNode* call = call_node();
646 if (call == NULL || call->outcnt() == 0 ||
647 call->in(0) == NULL || call->in(0)->is_top()) {
648 return;
649 }
650
651 const TypeTuple *r = call->tf()->domain();
652 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
653 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
654 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
655 return;
656 }
657 }
658
659 if (call->in(TypeFunc::Memory)->is_top()) {
660 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
661 return;
662 }
663 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
664 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
665 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
666 return; // dead path
667 }
668 }
669
670 // check for unreachable loop
671 CallProjections callprojs;
672 call->extract_projections(&callprojs, true);
673 if ((callprojs.fallthrough_catchproj == call->in(0)) ||
674 (callprojs.catchall_catchproj == call->in(0)) ||
675 (callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||
676 (callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||
677 (callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
678 (callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||
679 (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
680 (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
681 return;
682 }
683
684 Compile* C = Compile::current();
685 // Remove inlined methods from Compiler's lists.
686 if (call->is_macro()) {
687 C->remove_macro_node(call);
688 }
689
690 bool result_not_used = false;
691
692 if (is_pure_call()) {
693 // Disabled due to JDK-8276112
694 if (false && is_boxing_late_inline() && callprojs.resproj != nullptr) {
695 // replace box node to scalar node only in case it is directly referenced by debug info
696 assert(call->as_CallStaticJava()->is_boxing_method(), "sanity");
697 if (!has_non_debug_usages(callprojs.resproj) && is_box_cache_valid(call)) {
698 scalarize_debug_usages(call, callprojs.resproj);
699 }
700 }
701
702 // The call is marked as pure (no important side effects), but result isn't used.
703 // It's safe to remove the call.
704 result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
705 }
706
707 if (result_not_used) {
708 GraphKit kit(call->jvms());
709 kit.replace_call(call, C->top(), true);
710 } else {
711 // Make a clone of the JVMState that appropriate to use for driving a parse
712 JVMState* old_jvms = call->jvms();
713 JVMState* jvms = old_jvms->clone_shallow(C);
714 uint size = call->req();
715 SafePointNode* map = new SafePointNode(size, jvms);
716 for (uint i1 = 0; i1 < size; i1++) {
717 map->init_req(i1, call->in(i1));
718 }
719
720 // Make sure the state is a MergeMem for parsing.
721 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
722 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
723 C->initial_gvn()->set_type_bottom(mem);
724 map->set_req(TypeFunc::Memory, mem);
725 }
726
727 uint nargs = method()->arg_size();
728 // blow away old call arguments
729 Node* top = C->top();
730 for (uint i1 = 0; i1 < nargs; i1++) {
731 map->set_req(TypeFunc::Parms + i1, top);
732 }
733 jvms->set_map(map);
734
735 // Make enough space in the expression stack to transfer
736 // the incoming arguments and return value.
737 map->ensure_stack(jvms, jvms->method()->max_stack());
738 for (uint i1 = 0; i1 < nargs; i1++) {
739 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
740 }
741
742 C->print_inlining_assert_ready();
743
744 C->print_inlining_move_to(this);
745
746 C->log_late_inline(this);
747
748 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
749 if (!do_late_inline_check(C, jvms)) {
750 map->disconnect_inputs(C);
751 C->print_inlining_update_delayed(this);
752 return;
753 }
754
755 // Setup default node notes to be picked up by the inlining
756 Node_Notes* old_nn = C->node_notes_at(call->_idx);
757 if (old_nn != NULL) {
758 Node_Notes* entry_nn = old_nn->clone(C);
759 entry_nn->set_jvms(jvms);
760 C->set_default_node_notes(entry_nn);
761 }
762
763 // Now perform the inlining using the synthesized JVMState
764 JVMState* new_jvms = inline_cg()->generate(jvms);
765 if (new_jvms == NULL) return; // no change
766 if (C->failing()) return;
767
768 // Capture any exceptional control flow
769 GraphKit kit(new_jvms);
770
771 // Find the result object
772 Node* result = C->top();
773 int result_size = method()->return_type()->size();
774 if (result_size != 0 && !kit.stopped()) {
775 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
776 }
777
778 if (inline_cg()->is_inline()) {
779 C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
780 C->env()->notice_inlined_method(inline_cg()->method());
781 }
782 C->set_inlining_progress(true);
783 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
784 kit.replace_call(call, result, true);
785 }
786 }
787
788 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
789
790 public:
791 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
792 LateInlineCallGenerator(method, inline_cg) {}
793
794 virtual JVMState* generate(JVMState* jvms) {
795 Compile *C = Compile::current();
796
797 C->log_inline_id(this);
798
799 C->add_string_late_inline(this);
800
801 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
802 return new_jvms;
803 }
988 // Inline failed, so make a direct call.
989 assert(_if_hit->is_inline(), "must have been a failed inline");
990 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
991 new_jvms = cg->generate(kit.sync_jvms());
992 }
993 kit.add_exception_states_from(new_jvms);
994 kit.set_jvms(new_jvms);
995
996 // Need to merge slow and fast?
997 if (slow_map == NULL) {
998 // The fast path is the only path remaining.
999 return kit.transfer_exceptions_into_jvms();
1000 }
1001
1002 if (kit.stopped()) {
1003 // Inlined method threw an exception, so it's just the slow path after all.
1004 kit.set_jvms(slow_jvms);
1005 return kit.transfer_exceptions_into_jvms();
1006 }
1007
1008 // There are 2 branches and the replaced nodes are only valid on
1009 // one: restore the replaced nodes to what they were before the
1010 // branch.
1011 kit.map()->set_replaced_nodes(replaced_nodes);
1012
1013 // Finish the diamond.
1014 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1015 RegionNode* region = new RegionNode(3);
1016 region->init_req(1, kit.control());
1017 region->init_req(2, slow_map->control());
1018 kit.set_control(gvn.transform(region));
1019 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1020 iophi->set_req(2, slow_map->i_o());
1021 kit.set_i_o(gvn.transform(iophi));
1022 // Merge memory
1023 kit.merge_memory(slow_map->merged_memory(), region, 2);
1024 // Transform new memory Phis.
1025 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1026 Node* phi = mms.memory();
1027 if (phi->is_Phi() && phi->in(0) == region) {
1028 mms.set_memory(gvn.transform(phi));
1029 }
1030 }
1031 uint tos = kit.jvms()->stkoff() + kit.sp();
1032 uint limit = slow_map->req();
1033 for (uint i = TypeFunc::Parms; i < limit; i++) {
1034 // Skip unused stack slots; fast forward to monoff();
1035 if (i == tos) {
1036 i = kit.jvms()->monoff();
1037 if( i >= limit ) break;
1038 }
1039 Node* m = kit.map()->in(i);
1040 Node* n = slow_map->in(i);
1041 if (m != n) {
1042 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1043 Node* phi = PhiNode::make(region, m, t);
1044 phi->set_req(2, n);
1045 kit.map()->set_req(i, gvn.transform(phi));
1046 }
1047 }
1048 return kit.transfer_exceptions_into_jvms();
1049 }
1050
1051
1052 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1053 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1054 bool input_not_const;
1055 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1056 Compile* C = Compile::current();
1057 if (cg != NULL) {
1058 if (AlwaysIncrementalInline) {
1059 return CallGenerator::for_late_inline(callee, cg);
1060 } else {
1061 return cg;
1062 }
1063 }
1064 int bci = jvms->bci();
1065 ciCallProfile profile = caller->call_profile_at_bci(bci);
1066 int call_site_count = caller->scale_count(profile.count());
1067
1068 if (IncrementalInlineMH && call_site_count > 0 &&
1069 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1070 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1071 } else {
1072 // Out-of-line call.
1073 return CallGenerator::for_direct_call(callee);
1074 }
1075 }
1076
1077 class NativeCallGenerator : public CallGenerator {
1078 private:
1079 address _call_addr;
1080 ciNativeEntryPoint* _nep;
1081 public:
1082 NativeCallGenerator(ciMethod* m, address call_addr, ciNativeEntryPoint* nep)
1083 : CallGenerator(m), _call_addr(call_addr), _nep(nep) {}
1084
1085 virtual JVMState* generate(JVMState* jvms);
1086 };
1087
1088 JVMState* NativeCallGenerator::generate(JVMState* jvms) {
1089 GraphKit kit(jvms);
1090
1091 Node* call = kit.make_native_call(_call_addr, tf(), method()->arg_size(), _nep); // -fallback, - nep
1092 if (call == NULL) return NULL;
1093
1094 kit.C->print_inlining_update(this);
1095 if (kit.C->log() != NULL) {
1096 kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(_call_addr));
1112 case vmIntrinsics::_invokeBasic:
1113 {
1114 // Get MethodHandle receiver:
1115 Node* receiver = kit.argument(0);
1116 if (receiver->Opcode() == Op_ConP) {
1117 input_not_const = false;
1118 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
1119 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
1120 const int vtable_index = Method::invalid_vtable_index;
1121
1122 if (!ciMethod::is_consistent_info(callee, target)) {
1123 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1124 "signatures mismatch");
1125 return NULL;
1126 }
1127
1128 CallGenerator* cg = C->call_generator(target, vtable_index,
1129 false /* call_does_dispatch */,
1130 jvms,
1131 allow_inline,
1132 PROB_ALWAYS);
1133 return cg;
1134 } else {
1135 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1136 "receiver not constant");
1137 }
1138 }
1139 break;
1140
1141 case vmIntrinsics::_linkToVirtual:
1142 case vmIntrinsics::_linkToStatic:
1143 case vmIntrinsics::_linkToSpecial:
1144 case vmIntrinsics::_linkToInterface:
1145 {
1146 // Get MemberName argument:
1147 Node* member_name = kit.argument(callee->arg_size() - 1);
1148 if (member_name->Opcode() == Op_ConP) {
1149 input_not_const = false;
1150 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1151 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1152
1153 if (!ciMethod::is_consistent_info(callee, target)) {
1154 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1155 "signatures mismatch");
1156 return NULL;
1157 }
1158
1159 // In lambda forms we erase signature types to avoid resolving issues
1160 // involving class loaders. When we optimize a method handle invoke
1161 // to a direct call we must cast the receiver and arguments to its
1162 // actual types.
1163 ciSignature* signature = target->signature();
1164 const int receiver_skip = target->is_static() ? 0 : 1;
1165 // Cast receiver to its type.
1166 if (!target->is_static()) {
1167 Node* arg = kit.argument(0);
1168 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1169 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
1170 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1171 const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part
1172 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
1173 kit.set_argument(0, cast_obj);
1174 }
1175 }
1176 // Cast reference arguments to its type.
1177 for (int i = 0, j = 0; i < signature->count(); i++) {
1178 ciType* t = signature->type_at(i);
1179 if (t->is_klass()) {
1180 Node* arg = kit.argument(receiver_skip + j);
1181 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1182 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1183 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1184 const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
1185 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1186 kit.set_argument(receiver_skip + j, cast_obj);
1187 }
1188 }
1189 j += t->size(); // long and double take two slots
1190 }
1191
1192 // Try to get the most accurate receiver type
1193 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
1194 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1195 int vtable_index = Method::invalid_vtable_index;
1196 bool call_does_dispatch = false;
1197
1198 ciKlass* speculative_receiver_type = NULL;
1199 if (is_virtual_or_interface) {
1200 ciInstanceKlass* klass = target->holder();
1201 Node* receiver_node = kit.argument(0);
1202 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1203 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1204 // optimize_virtual_call() takes 2 different holder
1205 // arguments for a corner case that doesn't apply here (see
1206 // Parse::do_call())
1207 target = C->optimize_virtual_call(caller, klass, klass,
1208 target, receiver_type, is_virtual,
1209 call_does_dispatch, vtable_index, // out-parameters
1210 false /* check_access */);
1211 // We lack profiling at this call but type speculation may
1212 // provide us with a type
1213 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
1214 }
1215 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1216 allow_inline,
1217 PROB_ALWAYS,
1218 speculative_receiver_type);
1219 return cg;
1220 } else {
1221 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1222 "member_name not constant");
1223 }
1224 }
1225 break;
1226
1227 case vmIntrinsics::_linkToNative:
1228 {
1229 Node* addr_n = kit.argument(1); // target address
1230 Node* nep_n = kit.argument(callee->arg_size() - 1); // NativeEntryPoint
1231 // This check needs to be kept in sync with the one in CallStaticJavaNode::Ideal
1232 if (addr_n->Opcode() == Op_ConL && nep_n->Opcode() == Op_ConP) {
1233 input_not_const = false;
1234 const TypeLong* addr_t = addr_n->bottom_type()->is_long();
1235 const TypeOopPtr* nep_t = nep_n->bottom_type()->is_oopptr();
1236 address addr = (address) addr_t->get_con();
1237 ciNativeEntryPoint* nep = nep_t->const_oop()->as_native_entry_point();
1238 return new NativeCallGenerator(callee, addr, nep);
1288 // do_intrinsic(0)
1289 // else
1290 // if (predicate(1))
1291 // do_intrinsic(1)
1292 // ...
1293 // else
1294 // do_java_comp
1295
1296 GraphKit kit(jvms);
1297 PhaseGVN& gvn = kit.gvn();
1298
1299 CompileLog* log = kit.C->log();
1300 if (log != NULL) {
1301 log->elem("predicated_intrinsic bci='%d' method='%d'",
1302 jvms->bci(), log->identify(method()));
1303 }
1304
1305 if (!method()->is_static()) {
1306 // We need an explicit receiver null_check before checking its type in predicate.
1307 // We share a map with the caller, so his JVMS gets adjusted.
1308 Node* receiver = kit.null_check_receiver_before_call(method());
1309 if (kit.stopped()) {
1310 return kit.transfer_exceptions_into_jvms();
1311 }
1312 }
1313
1314 int n_predicates = _intrinsic->predicates_count();
1315 assert(n_predicates > 0, "sanity");
1316
1317 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1318
1319 // Region for normal compilation code if intrinsic failed.
1320 Node* slow_region = new RegionNode(1);
1321
1322 int results = 0;
1323 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1324 #ifdef ASSERT
1325 JVMState* old_jvms = kit.jvms();
1326 SafePointNode* old_map = kit.map();
1327 Node* old_io = old_map->i_o();
1328 Node* old_mem = old_map->memory();
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/inlinetypenode.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/subnode.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "ci/ciNativeEntryPoint.hpp"
45 #include "utilities/debug.hpp"
46
47 // Utility function.
48 const TypeFunc* CallGenerator::tf() const {
49 return TypeFunc::make(method());
50 }
51
52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
53 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
54 }
55
56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
57 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
58 return is_inlined_method_handle_intrinsic(symbolic_info, m);
102 GraphKit& exits = parser.exits();
103
104 if (C->failing()) {
105 while (exits.pop_exception_state() != NULL) ;
106 return NULL;
107 }
108
109 assert(exits.jvms()->same_calls_as(jvms), "sanity");
110
111 // Simply return the exit state of the parser,
112 // augmented by any exceptional states.
113 return exits.transfer_exceptions_into_jvms();
114 }
115
116 //---------------------------DirectCallGenerator------------------------------
117 // Internal class which handles all out-of-line calls w/o receiver type checks.
118 class DirectCallGenerator : public CallGenerator {
119 private:
120 CallStaticJavaNode* _call_node;
121 // Force separate memory and I/O projections for the exceptional
122 // paths to facilitate late inlining.
123 bool _separate_io_proj;
124
125 protected:
126 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
127
128 public:
129 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
130 : CallGenerator(method),
131 _call_node(NULL),
132 _separate_io_proj(separate_io_proj)
133 {
134 if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
135 // If that call has not been optimized by the time optimizations are over,
136 // we'll need to add a call to create an inline type instance from the klass
137 // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
138 // Separating memory and I/O projections for exceptions is required to
139 // perform that graph transformation.
140 _separate_io_proj = true;
141 }
142 }
143 virtual JVMState* generate(JVMState* jvms);
144
145 virtual CallNode* call_node() const { return _call_node; }
146 virtual CallGenerator* with_call_node(CallNode* call) {
147 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
148 dcg->set_call_node(call->as_CallStaticJava());
149 return dcg;
150 }
151 };
152
153 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
154 GraphKit kit(jvms);
155 kit.C->print_inlining_update(this);
156 PhaseGVN& gvn = kit.gvn();
157 bool is_static = method()->is_static();
158 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
159 : SharedRuntime::get_resolve_opt_virtual_call_stub();
160
161 if (kit.C->log() != NULL) {
162 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
163 }
164
165 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
166 if (is_inlined_method_handle_intrinsic(jvms, method())) {
167 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
168 // additional information about the method being invoked should be attached
169 // to the call site to make resolution logic work
170 // (see SharedRuntime::resolve_static_call_C).
171 call->set_override_symbolic_info(true);
172 }
173 _call_node = call; // Save the call node in case we need it later
174 if (!is_static) {
175 // Make an explicit receiver null_check as part of this call.
176 // Since we share a map with the caller, his JVMS gets adjusted.
177 kit.null_check_receiver_before_call(method());
178 if (kit.stopped()) {
179 // And dump it back to the caller, decorated with any exceptions:
180 return kit.transfer_exceptions_into_jvms();
181 }
182 // Mark the call node as virtual, sort of:
183 call->set_optimized_virtual(true);
184 if (method()->is_method_handle_intrinsic() ||
185 method()->is_compiled_lambda_form()) {
186 call->set_method_handle_invoke(true);
187 }
188 }
189 kit.set_arguments_for_java_call(call, is_late_inline());
190 if (kit.stopped()) {
191 return kit.transfer_exceptions_into_jvms();
192 }
193 kit.set_edges_for_java_call(call, false, _separate_io_proj);
194 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
195 kit.push_node(method()->return_type()->basic_type(), ret);
196 return kit.transfer_exceptions_into_jvms();
197 }
198
199 //--------------------------VirtualCallGenerator------------------------------
200 // Internal class which handles all out-of-line calls checking receiver type.
201 class VirtualCallGenerator : public CallGenerator {
202 private:
203 int _vtable_index;
204 bool _separate_io_proj;
205 CallDynamicJavaNode* _call_node;
206
207 protected:
208 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
209
210 public:
211 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
212 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)
213 {
214 assert(vtable_index == Method::invalid_vtable_index ||
215 vtable_index >= 0, "either invalid or usable");
216 }
217 virtual bool is_virtual() const { return true; }
218 virtual JVMState* generate(JVMState* jvms);
219
220 virtual CallNode* call_node() const { return _call_node; }
221 int vtable_index() const { return _vtable_index; }
222
223 virtual CallGenerator* with_call_node(CallNode* call) {
224 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
225 cg->set_call_node(call->as_CallDynamicJava());
226 return cg;
227 }
228 };
229
230 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
231 GraphKit kit(jvms);
232 Node* receiver = kit.argument(0);
233 kit.C->print_inlining_update(this);
234
235 if (kit.C->log() != NULL) {
236 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
237 }
238
239 // If the receiver is a constant null, do not torture the system
240 // by attempting to call through it. The compile will proceed
241 // correctly, but may bail out in final_graph_reshaping, because
242 // the call instruction will have a seemingly deficient out-count.
243 // (The bailout says something misleading about an "infinite loop".)
244 if (!receiver->is_InlineType() && kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
245 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
246 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
247 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
248 kit.inc_sp(arg_size); // restore arguments
249 kit.uncommon_trap(Deoptimization::Reason_null_check,
250 Deoptimization::Action_none,
251 NULL, "null receiver");
252 return kit.transfer_exceptions_into_jvms();
253 }
254
255 // Ideally we would unconditionally do a null check here and let it
256 // be converted to an implicit check based on profile information.
257 // However currently the conversion to implicit null checks in
258 // Block::implicit_null_check() only looks for loads and stores, not calls.
259 ciMethod *caller = kit.method();
260 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
261 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
262 ((ImplicitNullCheckThreshold > 0) && caller_md &&
263 (caller_md->trap_count(Deoptimization::Reason_null_check)
264 >= (uint)ImplicitNullCheckThreshold))) {
272 }
273
274 assert(!method()->is_static(), "virtual call must not be to static");
275 assert(!method()->is_final(), "virtual call should not be to final");
276 assert(!method()->is_private(), "virtual call should not be to private");
277 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
278 "no vtable calls if +UseInlineCaches ");
279 address target = SharedRuntime::get_resolve_virtual_call_stub();
280 // Normal inline cache used for call
281 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
282 if (is_inlined_method_handle_intrinsic(jvms, method())) {
283 // To be able to issue a direct call (optimized virtual or virtual)
284 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
285 // about the method being invoked should be attached to the call site to
286 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
287 call->set_override_symbolic_info(true);
288 }
289 _call_node = call; // Save the call node in case we need it later
290
291 kit.set_arguments_for_java_call(call);
292 if (kit.stopped()) {
293 return kit.transfer_exceptions_into_jvms();
294 }
295 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
296 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
297 kit.push_node(method()->return_type()->basic_type(), ret);
298
299 // Represent the effect of an implicit receiver null_check
300 // as part of this call. Since we share a map with the caller,
301 // his JVMS gets adjusted.
302 kit.cast_not_null(receiver);
303 return kit.transfer_exceptions_into_jvms();
304 }
305
306 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
307 if (InlineTree::check_can_parse(m) != NULL) return NULL;
308 return new ParseGenerator(m, expected_uses);
309 }
310
311 // As a special case, the JVMS passed to this CallGenerator is
312 // for the method execution already in progress, not just the JVMS
313 // of the caller. Thus, this CallGenerator cannot be mixed with others!
314 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
368 return DirectCallGenerator::generate(jvms);
369 }
370
371 virtual void print_inlining_late(const char* msg) {
372 CallNode* call = call_node();
373 Compile* C = Compile::current();
374 C->print_inlining_assert_ready();
375 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
376 C->print_inlining_move_to(this);
377 C->print_inlining_update_delayed(this);
378 }
379
380 virtual void set_unique_id(jlong id) {
381 _unique_id = id;
382 }
383
384 virtual jlong unique_id() const {
385 return _unique_id;
386 }
387
388 virtual CallGenerator* inline_cg() {
389 return _inline_cg;
390 }
391
392 virtual CallGenerator* with_call_node(CallNode* call) {
393 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
394 cg->set_call_node(call->as_CallStaticJava());
395 return cg;
396 }
397 };
398
399 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
400 return new LateInlineCallGenerator(method, inline_cg);
401 }
402
403 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
404 ciMethod* _caller;
405 bool _input_not_const;
406
407 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
408
409 public:
410 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
411 LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}
433 cg->set_call_node(call->as_CallStaticJava());
434 return cg;
435 }
436 };
437
438 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
439 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
440 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method wih
441 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
442 // of late inlining with exceptions.
443 assert(!jvms->method()->has_exception_handlers() ||
444 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
445 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
446 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
447 bool allow_inline = C->inlining_incrementally();
448 bool input_not_const = true;
449 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
450 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
451
452 if (cg != NULL) {
453 // AlwaysIncrementalInline causes for_method_handle_inline() to
454 // return a LateInlineCallGenerator. Extract the
455 // InlineCallGenerator from it.
456 if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
457 cg = cg->inline_cg();
458 assert(cg != NULL, "inline call generator expected");
459 }
460
461 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
462 _inline_cg = cg;
463 C->dec_number_of_mh_late_inlines();
464 return true;
465 } else {
466 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
467 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
468 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
469 return false;
470 }
471 }
472
473 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
474 assert(IncrementalInlineMH, "required");
475 Compile::current()->inc_number_of_mh_late_inlines();
476 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
477 return cg;
478 }
479
480 // Allow inlining decisions to be delayed
659
660 #ifndef PRODUCT
661 if (PrintEliminateAllocations) {
662 tty->print("++++ Eliminated: %d ", call->_idx);
663 call->as_CallStaticJava()->method()->print_short_name(tty);
664 tty->cr();
665 }
666 #endif
667 }
668
669 void CallGenerator::do_late_inline_helper() {
670 assert(is_late_inline(), "only late inline allowed");
671
672 // Can't inline it
673 CallNode* call = call_node();
674 if (call == NULL || call->outcnt() == 0 ||
675 call->in(0) == NULL || call->in(0)->is_top()) {
676 return;
677 }
678
679 const TypeTuple* r = call->tf()->domain_cc();
680 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
681 if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
682 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
683 return;
684 }
685 }
686
687 if (call->in(TypeFunc::Memory)->is_top()) {
688 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
689 return;
690 }
691 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
692 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
693 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
694 return; // dead path
695 }
696 }
697
698 // check for unreachable loop
699 CallProjections* callprojs = call->extract_projections(true);
700 if ((callprojs->fallthrough_catchproj == call->in(0)) ||
701 (callprojs->catchall_catchproj == call->in(0)) ||
702 (callprojs->fallthrough_memproj == call->in(TypeFunc::Memory)) ||
703 (callprojs->catchall_memproj == call->in(TypeFunc::Memory)) ||
704 (callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
705 (callprojs->catchall_ioproj == call->in(TypeFunc::I_O)) ||
706 (callprojs->exobj != NULL && call->find_edge(callprojs->exobj) != -1)) {
707 return;
708 }
709
710 Compile* C = Compile::current();
711 // Remove inlined methods from Compiler's lists.
712 if (call->is_macro()) {
713 C->remove_macro_node(call);
714 }
715
716 bool result_not_used = false;
717
718 if (is_pure_call()) {
719 // Disabled due to JDK-8276112
720 if (false && is_boxing_late_inline() && callprojs->resproj[0] != nullptr) {
721 // replace box node to scalar node only in case it is directly referenced by debug info
722 assert(call->as_CallStaticJava()->is_boxing_method(), "sanity");
723 if (!has_non_debug_usages(callprojs->resproj[0]) && is_box_cache_valid(call)) {
724 scalarize_debug_usages(call, callprojs->resproj[0]);
725 }
726 }
727
728 // The call is marked as pure (no important side effects), but result isn't used.
729 // It's safe to remove the call.
730 result_not_used = true;
731 for (uint i = 0; i < callprojs->nb_resproj; i++) {
732 if (callprojs->resproj[i] != NULL) {
733 if (callprojs->resproj[i]->outcnt() != 0) {
734 result_not_used = false;
735 }
736 if (call->find_edge(callprojs->resproj[i]) != -1) {
737 return;
738 }
739 }
740 }
741 }
742
743 if (result_not_used) {
744 GraphKit kit(call->jvms());
745 kit.replace_call(call, C->top(), true);
746 } else {
747 // Make a clone of the JVMState that appropriate to use for driving a parse
748 JVMState* old_jvms = call->jvms();
749 JVMState* jvms = old_jvms->clone_shallow(C);
750 uint size = call->req();
751 SafePointNode* map = new SafePointNode(size, jvms);
752 for (uint i1 = 0; i1 < size; i1++) {
753 map->init_req(i1, call->in(i1));
754 }
755
756 PhaseGVN& gvn = *C->initial_gvn();
757 // Make sure the state is a MergeMem for parsing.
758 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
759 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
760 gvn.set_type_bottom(mem);
761 map->set_req(TypeFunc::Memory, mem);
762 }
763
764 // blow away old call arguments
765 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
766 map->set_req(i1, C->top());
767 }
768 jvms->set_map(map);
769
770 // Make enough space in the expression stack to transfer
771 // the incoming arguments and return value.
772 map->ensure_stack(jvms, jvms->method()->max_stack());
773 const TypeTuple* domain_sig = call->_tf->domain_sig();
774 uint nargs = method()->arg_size();
775 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
776
777 uint j = TypeFunc::Parms;
778 int arg_num = 0;
779 for (uint i1 = 0; i1 < nargs; i1++) {
780 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
781 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
782 // Inline type arguments are not passed by reference: we get an argument per
783 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
784 GraphKit arg_kit(jvms, &gvn);
785 Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
786 map->set_control(arg_kit.control());
787 map->set_argument(jvms, i1, vt);
788 } else {
789 map->set_argument(jvms, i1, call->in(j++));
790 }
791 if (t != Type::HALF) {
792 arg_num++;
793 }
794 }
795
796 C->print_inlining_assert_ready();
797
798 C->print_inlining_move_to(this);
799
800 C->log_late_inline(this);
801
802 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
803 if (!do_late_inline_check(C, jvms)) {
804 map->disconnect_inputs(C);
805 C->print_inlining_update_delayed(this);
806 return;
807 }
808
809 // Check if we are late inlining a method handle call that returns an inline type as fields.
810 Node* buffer_oop = NULL;
811 ciMethod* inline_method = inline_cg()->method();
812 ciType* return_type = inline_method->return_type();
813 if (!call->tf()->returns_inline_type_as_fields() && is_mh_late_inline() &&
814 return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
815 // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
816 // Do this before the method handle call in case the buffer allocation triggers deoptimization and
817 // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
818 GraphKit arg_kit(jvms, &gvn);
819 {
820 PreserveReexecuteState preexecs(&arg_kit);
821 arg_kit.jvms()->set_should_reexecute(true);
822 arg_kit.inc_sp(nargs);
823 Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
824 buffer_oop = arg_kit.new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true);
825 }
826 jvms = arg_kit.transfer_exceptions_into_jvms();
827 }
828
829 // Setup default node notes to be picked up by the inlining
830 Node_Notes* old_nn = C->node_notes_at(call->_idx);
831 if (old_nn != NULL) {
832 Node_Notes* entry_nn = old_nn->clone(C);
833 entry_nn->set_jvms(jvms);
834 C->set_default_node_notes(entry_nn);
835 }
836
837 // Now perform the inlining using the synthesized JVMState
838 JVMState* new_jvms = inline_cg()->generate(jvms);
839 if (new_jvms == NULL) return; // no change
840 if (C->failing()) return;
841
842 // Capture any exceptional control flow
843 GraphKit kit(new_jvms);
844
845 // Find the result object
846 Node* result = C->top();
847 int result_size = method()->return_type()->size();
848 if (result_size != 0 && !kit.stopped()) {
849 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
850 }
851
852 if (inline_cg()->is_inline()) {
853 C->set_has_loops(C->has_loops() || inline_method->has_loops());
854 C->env()->notice_inlined_method(inline_method);
855 }
856 C->set_inlining_progress(true);
857 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
858
859 // Handle inline type returns
860 InlineTypeBaseNode* vt = result->isa_InlineTypeBase();
861 if (vt != NULL) {
862 if (call->tf()->returns_inline_type_as_fields()) {
863 vt->replace_call_results(&kit, call, C, inline_method->signature()->returns_null_free_inline_type());
864 } else if (vt->is_InlineType()) {
865 // Result might still be allocated (for example, if it has been stored to a non-flattened field)
866 if (!vt->is_allocated(&kit.gvn())) {
867 assert(buffer_oop != NULL, "should have allocated a buffer");
868 RegionNode* region = new RegionNode(3);
869
870 // Check if result is null
871 Node* null_ctl = kit.top();
872 if (!inline_method->signature()->returns_null_free_inline_type()) {
873 kit.null_check_common(vt->get_is_init(), T_INT, false, &null_ctl);
874 }
875 region->init_req(1, null_ctl);
876 PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
877 Node* init_mem = kit.reset_memory();
878 PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
879
880 // Not null, initialize the buffer
881 kit.set_all_memory(init_mem);
882 vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass());
883 // Do not let stores that initialize this buffer be reordered with a subsequent
884 // store that would make this buffer accessible by other threads.
885 AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop, &kit.gvn());
886 assert(alloc != NULL, "must have an allocation node");
887 kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
888 region->init_req(2, kit.control());
889 oop->init_req(2, buffer_oop);
890 mem->init_req(2, kit.merged_memory());
891
892 // Update oop input to buffer
893 kit.gvn().hash_delete(vt);
894 vt->set_oop(kit.gvn().transform(oop));
895 vt = kit.gvn().transform(vt)->as_InlineTypeBase();
896
897 kit.set_control(kit.gvn().transform(region));
898 kit.set_all_memory(kit.gvn().transform(mem));
899 kit.record_for_igvn(region);
900 kit.record_for_igvn(oop);
901 kit.record_for_igvn(mem);
902 }
903 result = vt->as_ptr(&kit.gvn(), inline_method->signature()->returns_null_free_inline_type());
904 }
905 DEBUG_ONLY(buffer_oop = NULL);
906 } else {
907 assert(result->is_top() || !call->tf()->returns_inline_type_as_fields(), "Unexpected return value");
908 }
909 assert(buffer_oop == NULL, "unused buffer allocation");
910
911 kit.replace_call(call, result, true);
912 }
913 }
914
915 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
916
917 public:
918 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
919 LateInlineCallGenerator(method, inline_cg) {}
920
921 virtual JVMState* generate(JVMState* jvms) {
922 Compile *C = Compile::current();
923
924 C->log_inline_id(this);
925
926 C->add_string_late_inline(this);
927
928 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
929 return new_jvms;
930 }
1115 // Inline failed, so make a direct call.
1116 assert(_if_hit->is_inline(), "must have been a failed inline");
1117 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1118 new_jvms = cg->generate(kit.sync_jvms());
1119 }
1120 kit.add_exception_states_from(new_jvms);
1121 kit.set_jvms(new_jvms);
1122
1123 // Need to merge slow and fast?
1124 if (slow_map == NULL) {
1125 // The fast path is the only path remaining.
1126 return kit.transfer_exceptions_into_jvms();
1127 }
1128
1129 if (kit.stopped()) {
1130 // Inlined method threw an exception, so it's just the slow path after all.
1131 kit.set_jvms(slow_jvms);
1132 return kit.transfer_exceptions_into_jvms();
1133 }
1134
1135 // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1136 uint tos = kit.jvms()->stkoff() + kit.sp();
1137 uint limit = slow_map->req();
1138 for (uint i = TypeFunc::Parms; i < limit; i++) {
1139 Node* m = kit.map()->in(i);
1140 Node* n = slow_map->in(i);
1141 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1142 if (m->is_InlineType() && !t->isa_inlinetype()) {
1143 // Allocate inline type in fast path
1144 m = m->as_InlineType()->buffer(&kit);
1145 kit.map()->set_req(i, m);
1146 }
1147 if (n->is_InlineType() && !t->isa_inlinetype()) {
1148 // Allocate inline type in slow path
1149 PreserveJVMState pjvms(&kit);
1150 kit.set_map(slow_map);
1151 n = n->as_InlineType()->buffer(&kit);
1152 kit.map()->set_req(i, n);
1153 slow_map = kit.stop();
1154 }
1155 }
1156
1157 // There are 2 branches and the replaced nodes are only valid on
1158 // one: restore the replaced nodes to what they were before the
1159 // branch.
1160 kit.map()->set_replaced_nodes(replaced_nodes);
1161
1162 // Finish the diamond.
1163 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1164 RegionNode* region = new RegionNode(3);
1165 region->init_req(1, kit.control());
1166 region->init_req(2, slow_map->control());
1167 kit.set_control(gvn.transform(region));
1168 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1169 iophi->set_req(2, slow_map->i_o());
1170 kit.set_i_o(gvn.transform(iophi));
1171 // Merge memory
1172 kit.merge_memory(slow_map->merged_memory(), region, 2);
1173 // Transform new memory Phis.
1174 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1175 Node* phi = mms.memory();
1176 if (phi->is_Phi() && phi->in(0) == region) {
1177 mms.set_memory(gvn.transform(phi));
1178 }
1179 }
1180 for (uint i = TypeFunc::Parms; i < limit; i++) {
1181 // Skip unused stack slots; fast forward to monoff();
1182 if (i == tos) {
1183 i = kit.jvms()->monoff();
1184 if( i >= limit ) break;
1185 }
1186 Node* m = kit.map()->in(i);
1187 Node* n = slow_map->in(i);
1188 if (m != n) {
1189 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1190 Node* phi = PhiNode::make(region, m, t);
1191 phi->set_req(2, n);
1192 kit.map()->set_req(i, gvn.transform(phi));
1193 }
1194 }
1195 return kit.transfer_exceptions_into_jvms();
1196 }
1197
1198
1199 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1200 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1201 bool input_not_const;
1202 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1203 Compile* C = Compile::current();
1204 if (cg != NULL) {
1205 if (AlwaysIncrementalInline) {
1206 return CallGenerator::for_late_inline(callee, cg);
1207 } else {
1208 return cg;
1209 }
1210 }
1211 int bci = jvms->bci();
1212 ciCallProfile profile = caller->call_profile_at_bci(bci);
1213 int call_site_count = caller->scale_count(profile.count());
1214
1215 if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1216 (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1217 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1218 } else {
1219 // Out-of-line call.
1220 return CallGenerator::for_direct_call(callee);
1221 }
1222 }
1223
1224 static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit, bool null_free) {
1225 PhaseGVN& gvn = kit.gvn();
1226 Node* arg = kit.argument(arg_nb);
1227 const Type* arg_type = arg->bottom_type();
1228 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1229 if (t->as_klass()->is_inlinetype() && null_free) {
1230 sig_type = sig_type->filter_speculative(TypePtr::NOTNULL);
1231 }
1232 if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) {
1233 const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
1234 arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1235 kit.set_argument(arg_nb, arg);
1236 }
1237 if (sig_type->is_inlinetypeptr() && !arg->is_InlineType()) {
1238 arg = InlineTypeNode::make_from_oop(&kit, arg, t->as_inline_klass(), !kit.gvn().type(arg)->maybe_null());
1239 kit.set_argument(arg_nb, arg);
1240 }
1241 }
1242
1243 class NativeCallGenerator : public CallGenerator {
1244 private:
1245 address _call_addr;
1246 ciNativeEntryPoint* _nep;
1247 public:
1248 NativeCallGenerator(ciMethod* m, address call_addr, ciNativeEntryPoint* nep)
1249 : CallGenerator(m), _call_addr(call_addr), _nep(nep) {}
1250
1251 virtual JVMState* generate(JVMState* jvms);
1252 };
1253
1254 JVMState* NativeCallGenerator::generate(JVMState* jvms) {
1255 GraphKit kit(jvms);
1256
1257 Node* call = kit.make_native_call(_call_addr, tf(), method()->arg_size(), _nep); // -fallback, - nep
1258 if (call == NULL) return NULL;
1259
1260 kit.C->print_inlining_update(this);
1261 if (kit.C->log() != NULL) {
1262 kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(_call_addr));
1278 case vmIntrinsics::_invokeBasic:
1279 {
1280 // Get MethodHandle receiver:
1281 Node* receiver = kit.argument(0);
1282 if (receiver->Opcode() == Op_ConP) {
1283 input_not_const = false;
1284 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
1285 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
1286 const int vtable_index = Method::invalid_vtable_index;
1287
1288 if (!ciMethod::is_consistent_info(callee, target)) {
1289 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1290 "signatures mismatch");
1291 return NULL;
1292 }
1293
1294 CallGenerator* cg = C->call_generator(target, vtable_index,
1295 false /* call_does_dispatch */,
1296 jvms,
1297 allow_inline,
1298 PROB_ALWAYS,
1299 NULL,
1300 true);
1301 return cg;
1302 } else {
1303 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1304 "receiver not constant");
1305 }
1306 }
1307 break;
1308
1309 case vmIntrinsics::_linkToVirtual:
1310 case vmIntrinsics::_linkToStatic:
1311 case vmIntrinsics::_linkToSpecial:
1312 case vmIntrinsics::_linkToInterface:
1313 {
1314 int nargs = callee->arg_size();
1315 // Get MemberName argument:
1316 Node* member_name = kit.argument(nargs - 1);
1317 if (member_name->Opcode() == Op_ConP) {
1318 input_not_const = false;
1319 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1320 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1321
1322 if (!ciMethod::is_consistent_info(callee, target)) {
1323 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1324 "signatures mismatch");
1325 return NULL;
1326 }
1327
1328 // In lambda forms we erase signature types to avoid resolving issues
1329 // involving class loaders. When we optimize a method handle invoke
1330 // to a direct call we must cast the receiver and arguments to its
1331 // actual types.
1332 ciSignature* signature = target->signature();
1333 const int receiver_skip = target->is_static() ? 0 : 1;
1334 // Cast receiver to its type.
1335 if (!target->is_static()) {
1336 cast_argument(nargs, 0, signature->accessing_klass(), kit, false);
1337 }
1338 // Cast reference arguments to its type.
1339 for (int i = 0, j = 0; i < signature->count(); i++) {
1340 ciType* t = signature->type_at(i);
1341 if (t->is_klass()) {
1342 bool null_free = signature->is_null_free_at(i);
1343 cast_argument(nargs, receiver_skip + j, t, kit, null_free);
1344 }
1345 j += t->size(); // long and double take two slots
1346 }
1347
1348 // Try to get the most accurate receiver type
1349 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
1350 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1351 int vtable_index = Method::invalid_vtable_index;
1352 bool call_does_dispatch = false;
1353
1354 ciKlass* speculative_receiver_type = NULL;
1355 if (is_virtual_or_interface) {
1356 ciInstanceKlass* klass = target->holder();
1357 Node* receiver_node = kit.argument(0);
1358 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1359 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1360 // optimize_virtual_call() takes 2 different holder
1361 // arguments for a corner case that doesn't apply here (see
1362 // Parse::do_call())
1363 target = C->optimize_virtual_call(caller, klass, klass,
1364 target, receiver_type, is_virtual,
1365 call_does_dispatch, vtable_index, // out-parameters
1366 false /* check_access */);
1367 // We lack profiling at this call but type speculation may
1368 // provide us with a type
1369 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
1370 }
1371 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1372 allow_inline,
1373 PROB_ALWAYS,
1374 speculative_receiver_type,
1375 true);
1376 return cg;
1377 } else {
1378 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1379 "member_name not constant");
1380 }
1381 }
1382 break;
1383
1384 case vmIntrinsics::_linkToNative:
1385 {
1386 Node* addr_n = kit.argument(1); // target address
1387 Node* nep_n = kit.argument(callee->arg_size() - 1); // NativeEntryPoint
1388 // This check needs to be kept in sync with the one in CallStaticJavaNode::Ideal
1389 if (addr_n->Opcode() == Op_ConL && nep_n->Opcode() == Op_ConP) {
1390 input_not_const = false;
1391 const TypeLong* addr_t = addr_n->bottom_type()->is_long();
1392 const TypeOopPtr* nep_t = nep_n->bottom_type()->is_oopptr();
1393 address addr = (address) addr_t->get_con();
1394 ciNativeEntryPoint* nep = nep_t->const_oop()->as_native_entry_point();
1395 return new NativeCallGenerator(callee, addr, nep);
1445 // do_intrinsic(0)
1446 // else
1447 // if (predicate(1))
1448 // do_intrinsic(1)
1449 // ...
1450 // else
1451 // do_java_comp
1452
1453 GraphKit kit(jvms);
1454 PhaseGVN& gvn = kit.gvn();
1455
1456 CompileLog* log = kit.C->log();
1457 if (log != NULL) {
1458 log->elem("predicated_intrinsic bci='%d' method='%d'",
1459 jvms->bci(), log->identify(method()));
1460 }
1461
1462 if (!method()->is_static()) {
1463 // We need an explicit receiver null_check before checking its type in predicate.
1464 // We share a map with the caller, so his JVMS gets adjusted.
1465 kit.null_check_receiver_before_call(method());
1466 if (kit.stopped()) {
1467 return kit.transfer_exceptions_into_jvms();
1468 }
1469 }
1470
1471 int n_predicates = _intrinsic->predicates_count();
1472 assert(n_predicates > 0, "sanity");
1473
1474 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1475
1476 // Region for normal compilation code if intrinsic failed.
1477 Node* slow_region = new RegionNode(1);
1478
1479 int results = 0;
1480 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1481 #ifdef ASSERT
1482 JVMState* old_jvms = kit.jvms();
1483 SafePointNode* old_map = kit.map();
1484 Node* old_io = old_map->i_o();
1485 Node* old_mem = old_map->memory();
|