13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/method.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/c2compiler.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/idealGraphPrinter.hpp"
34 #include "opto/locknode.hpp"
35 #include "opto/memnode.hpp"
36 #include "opto/opaquenode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/type.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/copy.hpp"
46
47 // Static array so we can figure out which bytecodes stop us from compiling
48 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
49 // and eventually should be encapsulated in a proper class (gri 8/18/98).
50
51 #ifndef PRODUCT
52 uint nodes_created = 0;
53 uint methods_parsed = 0;
85 }
86 if (all_null_checks_found) {
87 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
88 (100*implicit_null_checks)/all_null_checks_found);
89 }
90 if (SharedRuntime::_implicit_null_throws) {
91 tty->print_cr("%u implicit null exceptions at runtime",
92 SharedRuntime::_implicit_null_throws);
93 }
94
95 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
96 BytecodeParseHistogram::print();
97 }
98 }
99 #endif
100
101 //------------------------------ON STACK REPLACEMENT---------------------------
102
103 // Construct a node which can be used to get incoming state for
104 // on stack replacement.
105 Node *Parse::fetch_interpreter_state(int index,
106 BasicType bt,
107 Node *local_addrs,
108 Node *local_addrs_base) {
109 Node *mem = memory(Compile::AliasIdxRaw);
110 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
111 Node *ctl = control();
112
113 // Very similar to LoadNode::make, except we handle un-aligned longs and
114 // doubles on Sparc. Intel can handle them just fine directly.
115 Node *l = nullptr;
116 switch (bt) { // Signature is flattened
117 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
118 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
119 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
120 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
121 case T_LONG:
122 case T_DOUBLE: {
123 // Since arguments are in reverse order, the argument address 'adr'
124 // refers to the back half of the long/double. Recompute adr.
125 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
126 if (Matcher::misaligned_doubles_ok) {
127 l = (bt == T_DOUBLE)
128 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
130 } else {
131 l = (bt == T_DOUBLE)
132 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
133 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
134 }
135 break;
136 }
137 default: ShouldNotReachHere();
138 }
139 return _gvn.transform(l);
140 }
141
142 // Helper routine to prevent the interpreter from handing
143 // unexpected typestate to an OSR method.
144 // The Node l is a value newly dug out of the interpreter frame.
145 // The type is the type predicted by ciTypeFlow. Note that it is
146 // not a general type, but can only come from Type::get_typeflow_type.
147 // The safepoint is a map which will feed an uncommon trap.
148 Node* Parse::check_interpreter_type(Node* l, const Type* type,
149 SafePointNode* &bad_type_exit) {
150
151 const TypeOopPtr* tp = type->isa_oopptr();
152
153 // TypeFlow may assert null-ness if a type appears unloaded.
154 if (type == TypePtr::NULL_PTR ||
155 (tp != nullptr && !tp->is_loaded())) {
156 // Value must be null, not a real oop.
157 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
158 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
159 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
160 set_control(_gvn.transform( new IfTrueNode(iff) ));
161 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
162 bad_type_exit->control()->add_req(bad_type);
163 l = null();
164 }
165
166 // Typeflow can also cut off paths from the CFG, based on
167 // types which appear unloaded, or call sites which appear unlinked.
168 // When paths are cut off, values at later merge points can rise
169 // toward more specific classes. Make sure these specific classes
170 // are still in effect.
171 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
172 // TypeFlow asserted a specific object type. Value must have that type.
173 Node* bad_type_ctrl = nullptr;
174 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
175 bad_type_exit->control()->add_req(bad_type_ctrl);
176 }
177
178 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
179 return l;
180 }
181
182 // Helper routine which sets up elements of the initial parser map when
183 // performing a parse for on stack replacement. Add values into map.
184 // The only parameter contains the address of a interpreter arguments.
185 void Parse::load_interpreter_state(Node* osr_buf) {
186 int index;
187 int max_locals = jvms()->loc_size();
188 int max_stack = jvms()->stk_size();
189
190
191 // Mismatch between method and jvms can occur since map briefly held
192 // an OSR entry state (which takes up one RawPtr word).
193 assert(max_locals == method()->max_locals(), "sanity");
194 assert(max_stack >= method()->max_stack(), "sanity");
195 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
196 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
197
198 // Find the start block.
199 Block* osr_block = start_block();
200 assert(osr_block->start() == osr_bci(), "sanity");
201
202 // Set initial BCI.
203 set_parse_bci(osr_block->start());
204
205 // Set initial stack depth.
206 set_sp(osr_block->start_sp());
207
208 // Check bailouts. We currently do not perform on stack replacement
209 // of loops in catch blocks or loops which branch with a non-empty stack.
210 if (sp() != 0) {
211 C->record_method_not_compilable("OSR starts with non-empty stack");
212 return;
213 }
214 // Do not OSR inside finally clauses:
215 if (osr_block->has_trap_at(osr_block->start())) {
216 assert(false, "OSR starts with an immediate trap");
217 C->record_method_not_compilable("OSR starts with an immediate trap");
218 return;
219 }
220
221 // Commute monitors from interpreter frame to compiler frame.
222 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
223 int mcnt = osr_block->flow()->monitor_count();
224 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
225 for (index = 0; index < mcnt; index++) {
226 // Make a BoxLockNode for the monitor.
227 Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
228
229
230 // Displaced headers and locked objects are interleaved in the
231 // temp OSR buffer. We only copy the locked objects out here.
232 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
233 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
234 // Try and copy the displaced header to the BoxNode
235 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
236
237
238 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
239
240 // Build a bogus FastLockNode (no code will be generated) and push the
241 // monitor into our debug info.
242 const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
243 map()->push_monitor(flock);
244
245 // If the lock is our method synchronization lock, tuck it away in
246 // _sync_lock for return and rethrow exit paths.
247 if (index == 0 && method()->is_synchronized()) {
248 _synch_lock = flock;
249 }
250 }
251
252 // Use the raw liveness computation to make sure that unexpected
253 // values don't propagate into the OSR frame.
254 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
255 if (!live_locals.is_valid()) {
256 // Degenerate or breakpointed method.
284 if (C->log() != nullptr) {
285 C->log()->elem("OSR_mismatch local_index='%d'",index);
286 }
287 set_local(index, null());
288 // and ignore it for the loads
289 continue;
290 }
291 }
292
293 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
294 if (type == Type::TOP || type == Type::HALF) {
295 continue;
296 }
297 // If the type falls to bottom, then this must be a local that
298 // is mixing ints and oops or some such. Forcing it to top
299 // makes it go dead.
300 if (type == Type::BOTTOM) {
301 continue;
302 }
303 // Construct code to access the appropriate local.
304 BasicType bt = type->basic_type();
305 if (type == TypePtr::NULL_PTR) {
306 // Ptr types are mixed together with T_ADDRESS but null is
307 // really for T_OBJECT types so correct it.
308 bt = T_OBJECT;
309 }
310 Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
311 set_local(index, value);
312 }
313
314 // Extract the needed stack entries from the interpreter frame.
315 for (index = 0; index < sp(); index++) {
316 const Type *type = osr_block->stack_type_at(index);
317 if (type != Type::TOP) {
318 // Currently the compiler bails out when attempting to on stack replace
319 // at a bci with a non-empty stack. We should not reach here.
320 ShouldNotReachHere();
321 }
322 }
323
324 // End the OSR migration
325 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
326 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
327 "OSR_migration_end", TypeRawPtr::BOTTOM,
328 osr_buf);
329
330 // Now that the interpreter state is loaded, make sure it will match
493 log->elem("observe that='has_exception_handlers'");
494 }
495
496 assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier");
497 assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
498
499 // Always register dependence if JVMTI is enabled, because
500 // either breakpoint setting or hotswapping of methods may
501 // cause deoptimization.
502 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
503 C->dependencies()->assert_evol_method(method());
504 }
505
506 NOT_PRODUCT(methods_seen++);
507
508 // Do some special top-level things.
509 if (depth() == 1 && C->is_osr_compilation()) {
510 _entry_bci = C->entry_bci();
511 _flow = method()->get_osr_flow_analysis(osr_bci());
512 if (_flow->failing()) {
513 assert(false, "type flow analysis failed for OSR compilation");
514 C->record_method_not_compilable(_flow->failure_reason());
515 #ifndef PRODUCT
516 if (PrintOpto && (Verbose || WizardMode)) {
517 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
518 if (Verbose) {
519 method()->print();
520 method()->print_codes();
521 _flow->print();
522 }
523 }
524 #endif
525 }
526 _tf = C->tf(); // the OSR entry type is different
527 }
528
529 #ifdef ASSERT
530 if (depth() == 1) {
531 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
532 } else {
533 assert(!this->is_osr_parse(), "no recursive OSR");
581 do_method_entry();
582 }
583
584 if (depth() == 1 && !failing()) {
585 if (C->clinit_barrier_on_entry()) {
586 // Add check to deoptimize the nmethod once the holder class is fully initialized
587 clinit_deopt();
588 }
589
590 // Add check to deoptimize the nmethod if RTM state was changed
591 rtm_deopt();
592 }
593
594 // Check for bailouts during method entry or RTM state check setup.
595 if (failing()) {
596 if (log) log->done("parse");
597 C->set_default_node_notes(caller_nn);
598 return;
599 }
600
601 entry_map = map(); // capture any changes performed by method setup code
602 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
603
604 // We begin parsing as if we have just encountered a jump to the
605 // method entry.
606 Block* entry_block = start_block();
607 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
608 set_map_clone(entry_map);
609 merge_common(entry_block, entry_block->next_path_num());
610
611 #ifndef PRODUCT
612 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
613 set_parse_histogram( parse_histogram_obj );
614 #endif
615
616 // Parse all the basic blocks.
617 do_all_blocks();
618
619 // Check for bailouts during conversion to graph
620 if (failing()) {
766 void Parse::build_exits() {
767 // make a clone of caller to prevent sharing of side-effects
768 _exits.set_map(_exits.clone_map());
769 _exits.clean_stack(_exits.sp());
770 _exits.sync_jvms();
771
772 RegionNode* region = new RegionNode(1);
773 record_for_igvn(region);
774 gvn().set_type_bottom(region);
775 _exits.set_control(region);
776
777 // Note: iophi and memphi are not transformed until do_exits.
778 Node* iophi = new PhiNode(region, Type::ABIO);
779 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
780 gvn().set_type_bottom(iophi);
781 gvn().set_type_bottom(memphi);
782 _exits.set_i_o(iophi);
783 _exits.set_all_memory(memphi);
784
785 // Add a return value to the exit state. (Do not push it yet.)
786 if (tf()->range()->cnt() > TypeFunc::Parms) {
787 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
788 if (ret_type->isa_int()) {
789 BasicType ret_bt = method()->return_type()->basic_type();
790 if (ret_bt == T_BOOLEAN ||
791 ret_bt == T_CHAR ||
792 ret_bt == T_BYTE ||
793 ret_bt == T_SHORT) {
794 ret_type = TypeInt::INT;
795 }
796 }
797
798 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
799 // becomes loaded during the subsequent parsing, the loaded and unloaded
800 // types will not join when we transform and push in do_exits().
801 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
802 if (ret_oop_type && !ret_oop_type->is_loaded()) {
803 ret_type = TypeOopPtr::BOTTOM;
804 }
805 int ret_size = type2size[ret_type->basic_type()];
806 Node* ret_phi = new PhiNode(region, ret_type);
807 gvn().set_type_bottom(ret_phi);
808 _exits.ensure_stack(ret_size);
809 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
810 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
811 _exits.set_argument(0, ret_phi); // here is where the parser finds it
812 // Note: ret_phi is not yet pushed, until do_exits.
813 }
814 }
815
816
817 //----------------------------build_start_state-------------------------------
818 // Construct a state which contains only the incoming arguments from an
819 // unknown caller. The method & bci will be null & InvocationEntryBci.
820 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
821 int arg_size = tf->domain()->cnt();
822 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
823 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
824 SafePointNode* map = new SafePointNode(max_size, jvms);
825 record_for_igvn(map);
826 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
827 Node_Notes* old_nn = default_node_notes();
828 if (old_nn != nullptr && has_method()) {
829 Node_Notes* entry_nn = old_nn->clone(this);
830 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
831 entry_jvms->set_offsets(0);
832 entry_jvms->set_bci(entry_bci());
833 entry_nn->set_jvms(entry_jvms);
834 set_default_node_notes(entry_nn);
835 }
836 uint i;
837 for (i = 0; i < (uint)arg_size; i++) {
838 Node* parm = initial_gvn()->transform(new ParmNode(start, i));
839 map->init_req(i, parm);
840 // Record all these guys for later GVN.
841 record_for_igvn(parm);
842 }
843 for (; i < map->req(); i++) {
844 map->init_req(i, top());
845 }
846 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
847 set_default_node_notes(old_nn);
848 jvms->set_map(map);
849 return jvms;
850 }
851
852 //-----------------------------make_node_notes---------------------------------
853 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
854 if (caller_nn == nullptr) return nullptr;
855 Node_Notes* nn = caller_nn->clone(C);
856 JVMState* caller_jvms = nn->jvms();
857 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
858 jvms->set_offsets(0);
859 jvms->set_bci(_entry_bci);
860 nn->set_jvms(jvms);
861 return nn;
862 }
863
864
865 //--------------------------return_values--------------------------------------
866 void Compile::return_values(JVMState* jvms) {
867 GraphKit kit(jvms);
868 Node* ret = new ReturnNode(TypeFunc::Parms,
869 kit.control(),
870 kit.i_o(),
871 kit.reset_memory(),
872 kit.frameptr(),
873 kit.returnadr());
874 // Add zero or 1 return values
875 int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
876 if (ret_size > 0) {
877 kit.inc_sp(-ret_size); // pop the return value(s)
878 kit.sync_jvms();
879 ret->add_req(kit.argument(0));
880 // Note: The second dummy edge is not needed by a ReturnNode.
881 }
882 // bind it to root
883 root()->add_req(ret);
884 record_for_igvn(ret);
885 initial_gvn()->transform_no_reclaim(ret);
886 }
887
888 //------------------------rethrow_exceptions-----------------------------------
889 // Bind all exception states in the list into a single RethrowNode.
890 void Compile::rethrow_exceptions(JVMState* jvms) {
891 GraphKit kit(jvms);
892 if (!kit.has_exceptions()) return; // nothing to generate
893 // Load my combined exception state into the kit, with all phis transformed:
894 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
895 Node* ex_oop = kit.use_exception_state(ex_map);
896 RethrowNode* exit = new RethrowNode(kit.control(),
897 kit.i_o(), kit.reset_memory(),
898 kit.frameptr(), kit.returnadr(),
899 // like a return but with exception input
900 ex_oop);
984 // to complete, we force all writes to complete.
985 //
986 // 2. Experimental VM option is used to force the barrier if any field
987 // was written out in the constructor.
988 //
989 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
990 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
991 // MemBarVolatile is used before volatile load instead of after volatile
992 // store, so there's no barrier after the store.
993 // We want to guarantee the same behavior as on platforms with total store
994 // order, although this is not required by the Java memory model.
995 // In this case, we want to enforce visibility of volatile field
996 // initializations which are performed in constructors.
997 // So as with finals, we add a barrier here.
998 //
999 // "All bets are off" unless the first publication occurs after a
1000 // normal return from the constructor. We do not attempt to detect
1001 // such unusual early publications. But no barrier is needed on
1002 // exceptional returns, since they cannot publish normally.
1003 //
1004 if (method()->is_initializer() &&
1005 (wrote_final() ||
1006 (AlwaysSafeConstructors && wrote_fields()) ||
1007 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1008 _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1009
1010 // If Memory barrier is created for final fields write
1011 // and allocation node does not escape the initialize method,
1012 // then barrier introduced by allocation node can be removed.
1013 if (DoEscapeAnalysis && alloc_with_final()) {
1014 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final());
1015 alloc->compute_MemBar_redundancy(method());
1016 }
1017 if (PrintOpto && (Verbose || WizardMode)) {
1018 method()->print_name();
1019 tty->print_cr(" writes finals and needs a memory barrier");
1020 }
1021 }
1022
1023 // Any method can write a @Stable field; insert memory barriers
1024 // after those also. Can't bind predecessor allocation node (if any)
1025 // with barrier because allocation doesn't always dominate
1026 // MemBarRelease.
1027 if (wrote_stable()) {
1028 _exits.insert_mem_bar(Op_MemBarRelease);
1029 if (PrintOpto && (Verbose || WizardMode)) {
1030 method()->print_name();
1031 tty->print_cr(" writes @Stable and needs a memory barrier");
1032 }
1033 }
1034
1035 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1036 // transform each slice of the original memphi:
1037 mms.set_memory(_gvn.transform(mms.memory()));
1038 }
1039 // Clean up input MergeMems created by transforming the slices
1040 _gvn.transform(_exits.merged_memory());
1041
1042 if (tf()->range()->cnt() > TypeFunc::Parms) {
1043 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1044 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1045 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1046 // If the type we set for the ret_phi in build_exits() is too optimistic and
1047 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1048 // loading. It could also be due to an error, so mark this method as not compilable because
1049 // otherwise this could lead to an infinite compile loop.
1050 // In any case, this code path is rarely (and never in my testing) reached.
1051 #ifdef ASSERT
1052 tty->print_cr("# Can't determine return type.");
1053 tty->print_cr("# exit control");
1054 _exits.control()->dump(2);
1055 tty->print_cr("# ret phi type");
1056 _gvn.type(ret_phi)->dump();
1057 tty->print_cr("# ret phi");
1058 ret_phi->dump(2);
1059 #endif // ASSERT
1060 assert(false, "Can't determine return type.");
1061 C->record_method_not_compilable("Can't determine return type.");
1062 return;
1063 }
1127
1128 //-----------------------------create_entry_map-------------------------------
1129 // Initialize our parser map to contain the types at method entry.
1130 // For OSR, the map contains a single RawPtr parameter.
1131 // Initial monitor locking for sync. methods is performed by do_method_entry.
1132 SafePointNode* Parse::create_entry_map() {
1133 // Check for really stupid bail-out cases.
1134 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1135 if (len >= 32760) {
1136 // Bailout expected, this is a very rare edge case.
1137 C->record_method_not_compilable("too many local variables");
1138 return nullptr;
1139 }
1140
1141 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1142 _caller->map()->delete_replaced_nodes();
1143
1144 // If this is an inlined method, we may have to do a receiver null check.
1145 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1146 GraphKit kit(_caller);
1147 kit.null_check_receiver_before_call(method());
1148 _caller = kit.transfer_exceptions_into_jvms();
1149 if (kit.stopped()) {
1150 _exits.add_exception_states_from(_caller);
1151 _exits.set_jvms(_caller);
1152 return nullptr;
1153 }
1154 }
1155
1156 assert(method() != nullptr, "parser must have a method");
1157
1158 // Create an initial safepoint to hold JVM state during parsing
1159 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1160 set_map(new SafePointNode(len, jvms));
1161 jvms->set_map(map());
1162 record_for_igvn(map());
1163 assert(jvms->endoff() == len, "correct jvms sizing");
1164
1165 SafePointNode* inmap = _caller->map();
1166 assert(inmap != nullptr, "must have inmap");
1167 // In case of null check on receiver above
1168 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1169
1170 uint i;
1171
1172 // Pass thru the predefined input parameters.
1173 for (i = 0; i < TypeFunc::Parms; i++) {
1174 map()->init_req(i, inmap->in(i));
1175 }
1176
1177 if (depth() == 1) {
1178 assert(map()->memory()->Opcode() == Op_Parm, "");
1179 // Insert the memory aliasing node
1180 set_all_memory(reset_memory());
1181 }
1182 assert(merged_memory(), "");
1183
1184 // Now add the locals which are initially bound to arguments:
1185 uint arg_size = tf()->domain()->cnt();
1186 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1187 for (i = TypeFunc::Parms; i < arg_size; i++) {
1188 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1189 }
1190
1191 // Clear out the rest of the map (locals and stack)
1192 for (i = arg_size; i < len; i++) {
1193 map()->init_req(i, top());
1194 }
1195
1196 SafePointNode* entry_map = stop();
1197 return entry_map;
1198 }
1199
1200 //-----------------------------do_method_entry--------------------------------
1201 // Emit any code needed in the pseudo-block before BCI zero.
1202 // The main thing to do is lock the receiver of a synchronized method.
1203 void Parse::do_method_entry() {
1204 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1205 set_sp(0); // Java Stack Pointer
1239
1240 // If the method is synchronized, we need to construct a lock node, attach
1241 // it to the Start node, and pin it there.
1242 if (method()->is_synchronized()) {
1243 // Insert a FastLockNode right after the Start which takes as arguments
1244 // the current thread pointer, the "this" pointer & the address of the
1245 // stack slot pair used for the lock. The "this" pointer is a projection
1246 // off the start node, but the locking spot has to be constructed by
1247 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1248 // becomes the second argument to the FastLockNode call. The
1249 // FastLockNode becomes the new control parent to pin it to the start.
1250
1251 // Setup Object Pointer
1252 Node *lock_obj = nullptr;
1253 if (method()->is_static()) {
1254 ciInstance* mirror = _method->holder()->java_mirror();
1255 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1256 lock_obj = makecon(t_lock);
1257 } else { // Else pass the "this" pointer,
1258 lock_obj = local(0); // which is Parm0 from StartNode
1259 }
1260 // Clear out dead values from the debug info.
1261 kill_dead_locals();
1262 // Build the FastLockNode
1263 _synch_lock = shared_lock(lock_obj);
1264 }
1265
1266 // Feed profiling data for parameters to the type system so it can
1267 // propagate it as speculative types
1268 record_profiled_parameters_for_speculation();
1269 }
1270
1271 //------------------------------init_blocks------------------------------------
1272 // Initialize our parser map to contain the types/monitors at method entry.
1273 void Parse::init_blocks() {
1274 // Create the blocks.
1275 _block_count = flow()->block_count();
1276 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1277
1278 // Initialize the structs.
1672 //--------------------handle_missing_successor---------------------------------
1673 void Parse::handle_missing_successor(int target_bci) {
1674 #ifndef PRODUCT
1675 Block* b = block();
1676 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1677 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1678 #endif
1679 ShouldNotReachHere();
1680 }
1681
1682 //--------------------------merge_common---------------------------------------
1683 void Parse::merge_common(Parse::Block* target, int pnum) {
1684 if (TraceOptoParse) {
1685 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1686 }
1687
1688 // Zap extra stack slots to top
1689 assert(sp() == target->start_sp(), "");
1690 clean_stack(sp());
1691
1692 if (!target->is_merged()) { // No prior mapping at this bci
1693 if (TraceOptoParse) { tty->print(" with empty state"); }
1694
1695 // If this path is dead, do not bother capturing it as a merge.
1696 // It is "as if" we had 1 fewer predecessors from the beginning.
1697 if (stopped()) {
1698 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1699 return;
1700 }
1701
1702 // Make a region if we know there are multiple or unpredictable inputs.
1703 // (Also, if this is a plain fall-through, we might see another region,
1704 // which must not be allowed into this block's map.)
1705 if (pnum > PhiNode::Input // Known multiple inputs.
1706 || target->is_handler() // These have unpredictable inputs.
1707 || target->is_loop_head() // Known multiple inputs
1708 || control()->is_Region()) { // We must hide this guy.
1709
1710 int current_bci = bci();
1711 set_parse_bci(target->start()); // Set target bci
1726 record_for_igvn(r);
1727 // zap all inputs to null for debugging (done in Node(uint) constructor)
1728 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1729 r->init_req(pnum, control());
1730 set_control(r);
1731 target->copy_irreducible_status_to(r, jvms());
1732 set_parse_bci(current_bci); // Restore bci
1733 }
1734
1735 // Convert the existing Parser mapping into a mapping at this bci.
1736 store_state_to(target);
1737 assert(target->is_merged(), "do not come here twice");
1738
1739 } else { // Prior mapping at this bci
1740 if (TraceOptoParse) { tty->print(" with previous state"); }
1741 #ifdef ASSERT
1742 if (target->is_SEL_head()) {
1743 target->mark_merged_backedge(block());
1744 }
1745 #endif
1746 // We must not manufacture more phis if the target is already parsed.
1747 bool nophi = target->is_parsed();
1748
1749 SafePointNode* newin = map();// Hang on to incoming mapping
1750 Block* save_block = block(); // Hang on to incoming block;
1751 load_state_from(target); // Get prior mapping
1752
1753 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1754 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1755 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1756 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1757
1758 // Iterate over my current mapping and the old mapping.
1759 // Where different, insert Phi functions.
1760 // Use any existing Phi functions.
1761 assert(control()->is_Region(), "must be merging to a region");
1762 RegionNode* r = control()->as_Region();
1763
1764 // Compute where to merge into
1765 // Merge incoming control path
1766 r->init_req(pnum, newin->control());
1767
1768 if (pnum == 1) { // Last merge for this Region?
1769 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1770 Node* result = _gvn.transform_no_reclaim(r);
1771 if (r != result && TraceOptoParse) {
1772 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1773 }
1774 }
1775 record_for_igvn(r);
1776 }
1777
1778 // Update all the non-control inputs to map:
1779 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1780 bool check_elide_phi = target->is_SEL_backedge(save_block);
1781 for (uint j = 1; j < newin->req(); j++) {
1782 Node* m = map()->in(j); // Current state of target.
1783 Node* n = newin->in(j); // Incoming change to target state.
1784 PhiNode* phi;
1785 if (m->is_Phi() && m->as_Phi()->region() == r)
1786 phi = m->as_Phi();
1787 else
1788 phi = nullptr;
1789 if (m != n) { // Different; must merge
1790 switch (j) {
1791 // Frame pointer and Return Address never changes
1792 case TypeFunc::FramePtr:// Drop m, use the original value
1793 case TypeFunc::ReturnAdr:
1794 break;
1795 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1796 assert(phi == nullptr, "the merge contains phis, not vice versa");
1797 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1798 continue;
1799 default: // All normal stuff
1800 if (phi == nullptr) {
1801 const JVMState* jvms = map()->jvms();
1802 if (EliminateNestedLocks &&
1803 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1804 // BoxLock nodes are not commoning.
1805 // Use old BoxLock node as merged box.
1806 assert(newin->jvms()->is_monitor_box(j), "sanity");
1807 // This assert also tests that nodes are BoxLock.
1808 assert(BoxLockNode::same_slot(n, m), "sanity");
1809 C->gvn_replace_by(n, m);
1810 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1811 phi = ensure_phi(j, nophi);
1812 }
1813 }
1814 break;
1815 }
1816 }
1817 // At this point, n might be top if:
1818 // - there is no phi (because TypeFlow detected a conflict), or
1819 // - the corresponding control edges is top (a dead incoming path)
1820 // It is a bug if we create a phi which sees a garbage value on a live path.
1821
1822 if (phi != nullptr) {
1823 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1824 assert(phi->region() == r, "");
1825 phi->set_req(pnum, n); // Then add 'n' to the merge
1826 if (pnum == PhiNode::Input) {
1827 // Last merge for this Phi.
1828 // So far, Phis have had a reasonable type from ciTypeFlow.
1829 // Now _gvn will join that with the meet of current inputs.
1830 // BOTTOM is never permissible here, 'cause pessimistically
1831 // Phis of pointers cannot lose the basic pointer type.
1832 debug_only(const Type* bt1 = phi->bottom_type());
1833 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1834 map()->set_req(j, _gvn.transform_no_reclaim(phi));
1835 debug_only(const Type* bt2 = phi->bottom_type());
1836 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1837 record_for_igvn(phi);
1838 }
1839 }
1840 } // End of for all values to be merged
1841
1842 if (pnum == PhiNode::Input &&
1843 !r->in(0)) { // The occasional useless Region
1844 assert(control() == r, "");
1845 set_control(r->nonnull_req());
1846 }
1847
1848 map()->merge_replaced_nodes_with(newin);
1849
1850 // newin has been subsumed into the lazy merge, and is now dead.
1851 set_block(save_block);
1852
1853 stop(); // done with this guy, for now
1854 }
1855
1856 if (TraceOptoParse) {
1857 tty->print_cr(" on path %d", pnum);
1858 }
1859
1860 // Done with this parser state.
1861 assert(stopped(), "");
1862 }
1863
1975
1976 // Add new path to the region.
1977 uint pnum = r->req();
1978 r->add_req(nullptr);
1979
1980 for (uint i = 1; i < map->req(); i++) {
1981 Node* n = map->in(i);
1982 if (i == TypeFunc::Memory) {
1983 // Ensure a phi on all currently known memories.
1984 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1985 Node* phi = mms.memory();
1986 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1987 assert(phi->req() == pnum, "must be same size as region");
1988 phi->add_req(nullptr);
1989 }
1990 }
1991 } else {
1992 if (n->is_Phi() && n->as_Phi()->region() == r) {
1993 assert(n->req() == pnum, "must be same size as region");
1994 n->add_req(nullptr);
1995 }
1996 }
1997 }
1998
1999 return pnum;
2000 }
2001
2002 //------------------------------ensure_phi-------------------------------------
2003 // Turn the idx'th entry of the current map into a Phi
2004 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2005 SafePointNode* map = this->map();
2006 Node* region = map->control();
2007 assert(region->is_Region(), "");
2008
2009 Node* o = map->in(idx);
2010 assert(o != nullptr, "");
2011
2012 if (o == top()) return nullptr; // TOP always merges into TOP
2013
2014 if (o->is_Phi() && o->as_Phi()->region() == region) {
2015 return o->as_Phi();
2016 }
2017
2018 // Now use a Phi here for merging
2019 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2020 const JVMState* jvms = map->jvms();
2021 const Type* t = nullptr;
2022 if (jvms->is_loc(idx)) {
2023 t = block()->local_type_at(idx - jvms->locoff());
2024 } else if (jvms->is_stk(idx)) {
2025 t = block()->stack_type_at(idx - jvms->stkoff());
2026 } else if (jvms->is_mon(idx)) {
2027 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2028 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2029 } else if ((uint)idx < TypeFunc::Parms) {
2030 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2031 } else {
2032 assert(false, "no type information for this phi");
2033 }
2034
2035 // If the type falls to bottom, then this must be a local that
2036 // is mixing ints and oops or some such. Forcing it to top
2037 // makes it go dead.
2038 if (t == Type::BOTTOM) {
2039 map->set_req(idx, top());
2040 return nullptr;
2041 }
2042
2043 // Do not create phis for top either.
2044 // A top on a non-null control flow must be an unused even after the.phi.
2045 if (t == Type::TOP || t == Type::HALF) {
2046 map->set_req(idx, top());
2047 return nullptr;
2048 }
2049
2050 PhiNode* phi = PhiNode::make(region, o, t);
2051 gvn().set_type(phi, t);
2052 if (C->do_escape_analysis()) record_for_igvn(phi);
2053 map->set_req(idx, phi);
2054 return phi;
2055 }
2056
2057 //--------------------------ensure_memory_phi----------------------------------
2058 // Turn the idx'th slice of the current memory into a Phi
2059 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2060 MergeMemNode* mem = merged_memory();
2061 Node* region = control();
2062 assert(region->is_Region(), "");
2063
2064 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2065 assert(o != nullptr && o != top(), "");
2066
2067 PhiNode* phi;
2068 if (o->is_Phi() && o->as_Phi()->region() == region) {
2069 phi = o->as_Phi();
2070 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2071 // clone the shared base memory phi to make a new memory split
2072 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2073 const Type* t = phi->bottom_type();
2074 const TypePtr* adr_type = C->get_adr_type(idx);
2202 Node* chk = _gvn.transform( new CmpINode(opq, profile_state) );
2203 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
2204 // Branch to failure if state was changed
2205 { BuildCutout unless(this, tst, PROB_ALWAYS);
2206 uncommon_trap(Deoptimization::Reason_rtm_state_change,
2207 Deoptimization::Action_make_not_entrant);
2208 }
2209 }
2210 #endif
2211 }
2212
2213 //------------------------------return_current---------------------------------
2214 // Append current _map to _exit_return
2215 void Parse::return_current(Node* value) {
2216 if (RegisterFinalizersAtInit &&
2217 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2218 call_register_finalizer();
2219 }
2220
2221 // Do not set_parse_bci, so that return goo is credited to the return insn.
2222 set_bci(InvocationEntryBci);
2223 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2224 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2225 }
2226 if (C->env()->dtrace_method_probes()) {
2227 make_dtrace_method_exit(method());
2228 }
2229 SafePointNode* exit_return = _exits.map();
2230 exit_return->in( TypeFunc::Control )->add_req( control() );
2231 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2232 Node *mem = exit_return->in( TypeFunc::Memory );
2233 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2234 if (mms.is_empty()) {
2235 // get a copy of the base memory, and patch just this one input
2236 const TypePtr* adr_type = mms.adr_type(C);
2237 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2238 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2239 gvn().set_type_bottom(phi);
2240 phi->del_req(phi->req()-1); // prepare to re-patch
2241 mms.set_memory(phi);
2242 }
2243 mms.memory()->add_req(mms.memory2());
2244 }
2245
2246 // frame pointer is always same, already captured
2247 if (value != nullptr) {
2248 // If returning oops to an interface-return, there is a silent free
2249 // cast from oop to interface allowed by the Verifier. Make it explicit
2250 // here.
2251 Node* phi = _exits.argument(0);
2252 phi->add_req(value);
2253 }
2254
2255 if (_first_return) {
2256 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2257 _first_return = false;
2258 } else {
2259 _exits.map()->merge_replaced_nodes_with(map());
2260 }
2261
2262 stop_and_kill_map(); // This CFG path dies here
2263 }
2264
2265
2266 //------------------------------add_safepoint----------------------------------
2267 void Parse::add_safepoint() {
2268 uint parms = TypeFunc::Parms+1;
2269
2270 // Clear out dead values from the debug info.
2271 kill_dead_locals();
2272
2273 // Clone the JVM State
2274 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/method.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/c2compiler.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/convertnode.hpp"
34 #include "opto/idealGraphPrinter.hpp"
35 #include "opto/inlinetypenode.hpp"
36 #include "opto/locknode.hpp"
37 #include "opto/memnode.hpp"
38 #include "opto/opaquenode.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/type.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/safepointMechanism.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/bitMap.inline.hpp"
47 #include "utilities/copy.hpp"
48
49 // Static array so we can figure out which bytecodes stop us from compiling
50 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
51 // and eventually should be encapsulated in a proper class (gri 8/18/98).
52
53 #ifndef PRODUCT
54 uint nodes_created = 0;
55 uint methods_parsed = 0;
87 }
88 if (all_null_checks_found) {
89 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
90 (100*implicit_null_checks)/all_null_checks_found);
91 }
92 if (SharedRuntime::_implicit_null_throws) {
93 tty->print_cr("%u implicit null exceptions at runtime",
94 SharedRuntime::_implicit_null_throws);
95 }
96
97 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
98 BytecodeParseHistogram::print();
99 }
100 }
101 #endif
102
103 //------------------------------ON STACK REPLACEMENT---------------------------
104
105 // Construct a node which can be used to get incoming state for
106 // on stack replacement.
107 Node* Parse::fetch_interpreter_state(int index,
108 const Type* type,
109 Node* local_addrs,
110 Node* local_addrs_base) {
111 BasicType bt = type->basic_type();
112 if (type == TypePtr::NULL_PTR) {
113 // Ptr types are mixed together with T_ADDRESS but nullptr is
114 // really for T_OBJECT types so correct it.
115 bt = T_OBJECT;
116 }
117 Node *mem = memory(Compile::AliasIdxRaw);
118 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
119 Node *ctl = control();
120
121 // Very similar to LoadNode::make, except we handle un-aligned longs and
122 // doubles on Sparc. Intel can handle them just fine directly.
123 Node *l = nullptr;
124 switch (bt) { // Signature is flattened
125 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
126 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
127 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
128 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
129 case T_LONG:
130 case T_DOUBLE: {
131 // Since arguments are in reverse order, the argument address 'adr'
132 // refers to the back half of the long/double. Recompute adr.
133 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
134 if (Matcher::misaligned_doubles_ok) {
135 l = (bt == T_DOUBLE)
136 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
138 } else {
139 l = (bt == T_DOUBLE)
140 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
141 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
142 }
143 break;
144 }
145 default: ShouldNotReachHere();
146 }
147 return _gvn.transform(l);
148 }
149
150 // Helper routine to prevent the interpreter from handing
151 // unexpected typestate to an OSR method.
152 // The Node l is a value newly dug out of the interpreter frame.
153 // The type is the type predicted by ciTypeFlow. Note that it is
154 // not a general type, but can only come from Type::get_typeflow_type.
155 // The safepoint is a map which will feed an uncommon trap.
156 Node* Parse::check_interpreter_type(Node* l, const Type* type,
157 SafePointNode* &bad_type_exit) {
158 const TypeOopPtr* tp = type->isa_oopptr();
159
160 // TypeFlow may assert null-ness if a type appears unloaded.
161 if (type == TypePtr::NULL_PTR ||
162 (tp != nullptr && !tp->is_loaded())) {
163 // Value must be null, not a real oop.
164 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
165 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
166 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
167 set_control(_gvn.transform( new IfTrueNode(iff) ));
168 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
169 bad_type_exit->control()->add_req(bad_type);
170 l = null();
171 }
172
173 // Typeflow can also cut off paths from the CFG, based on
174 // types which appear unloaded, or call sites which appear unlinked.
175 // When paths are cut off, values at later merge points can rise
176 // toward more specific classes. Make sure these specific classes
177 // are still in effect.
178 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
179 // TypeFlow asserted a specific object type. Value must have that type.
180 Node* bad_type_ctrl = nullptr;
181 if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
182 // Check inline types for null here to prevent checkcast from adding an
183 // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
184 l = null_check_oop(l, &bad_type_ctrl);
185 bad_type_exit->control()->add_req(bad_type_ctrl);
186 }
187 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
188 bad_type_exit->control()->add_req(bad_type_ctrl);
189 }
190
191 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
192 return l;
193 }
194
195 // Helper routine which sets up elements of the initial parser map when
196 // performing a parse for on stack replacement. Add values into map.
197 // The only parameter contains the address of a interpreter arguments.
198 void Parse::load_interpreter_state(Node* osr_buf) {
199 int index;
200 int max_locals = jvms()->loc_size();
201 int max_stack = jvms()->stk_size();
202
203 // Mismatch between method and jvms can occur since map briefly held
204 // an OSR entry state (which takes up one RawPtr word).
205 assert(max_locals == method()->max_locals(), "sanity");
206 assert(max_stack >= method()->max_stack(), "sanity");
207 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
208 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
209
210 // Find the start block.
211 Block* osr_block = start_block();
212 assert(osr_block->start() == osr_bci(), "sanity");
213
214 // Set initial BCI.
215 set_parse_bci(osr_block->start());
216
217 // Set initial stack depth.
218 set_sp(osr_block->start_sp());
219
220 // Check bailouts. We currently do not perform on stack replacement
221 // of loops in catch blocks or loops which branch with a non-empty stack.
222 if (sp() != 0) {
223 C->record_method_not_compilable("OSR starts with non-empty stack");
224 return;
225 }
226 // Do not OSR inside finally clauses:
227 if (osr_block->has_trap_at(osr_block->start())) {
228 assert(false, "OSR starts with an immediate trap");
229 C->record_method_not_compilable("OSR starts with an immediate trap");
230 return;
231 }
232
233 // Commute monitors from interpreter frame to compiler frame.
234 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
235 int mcnt = osr_block->flow()->monitor_count();
236 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
237 for (index = 0; index < mcnt; index++) {
238 // Make a BoxLockNode for the monitor.
239 Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
240
241 // Displaced headers and locked objects are interleaved in the
242 // temp OSR buffer. We only copy the locked objects out here.
243 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
244 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
245 // Try and copy the displaced header to the BoxNode
246 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
247
248 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
249
250 // Build a bogus FastLockNode (no code will be generated) and push the
251 // monitor into our debug info.
252 const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
253 map()->push_monitor(flock);
254
255 // If the lock is our method synchronization lock, tuck it away in
256 // _sync_lock for return and rethrow exit paths.
257 if (index == 0 && method()->is_synchronized()) {
258 _synch_lock = flock;
259 }
260 }
261
262 // Use the raw liveness computation to make sure that unexpected
263 // values don't propagate into the OSR frame.
264 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
265 if (!live_locals.is_valid()) {
266 // Degenerate or breakpointed method.
294 if (C->log() != nullptr) {
295 C->log()->elem("OSR_mismatch local_index='%d'",index);
296 }
297 set_local(index, null());
298 // and ignore it for the loads
299 continue;
300 }
301 }
302
303 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
304 if (type == Type::TOP || type == Type::HALF) {
305 continue;
306 }
307 // If the type falls to bottom, then this must be a local that
308 // is mixing ints and oops or some such. Forcing it to top
309 // makes it go dead.
310 if (type == Type::BOTTOM) {
311 continue;
312 }
313 // Construct code to access the appropriate local.
314 Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
315 set_local(index, value);
316 }
317
318 // Extract the needed stack entries from the interpreter frame.
319 for (index = 0; index < sp(); index++) {
320 const Type *type = osr_block->stack_type_at(index);
321 if (type != Type::TOP) {
322 // Currently the compiler bails out when attempting to on stack replace
323 // at a bci with a non-empty stack. We should not reach here.
324 ShouldNotReachHere();
325 }
326 }
327
328 // End the OSR migration
329 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
330 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
331 "OSR_migration_end", TypeRawPtr::BOTTOM,
332 osr_buf);
333
334 // Now that the interpreter state is loaded, make sure it will match
497 log->elem("observe that='has_exception_handlers'");
498 }
499
500 assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier");
501 assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
502
503 // Always register dependence if JVMTI is enabled, because
504 // either breakpoint setting or hotswapping of methods may
505 // cause deoptimization.
506 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
507 C->dependencies()->assert_evol_method(method());
508 }
509
510 NOT_PRODUCT(methods_seen++);
511
512 // Do some special top-level things.
513 if (depth() == 1 && C->is_osr_compilation()) {
514 _entry_bci = C->entry_bci();
515 _flow = method()->get_osr_flow_analysis(osr_bci());
516 if (_flow->failing()) {
517 // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
518 // can lead to this. Re-enable once 8284443 is fixed.
519 // assert(false, "type flow analysis failed for OSR compilation");
520 C->record_method_not_compilable(_flow->failure_reason());
521 #ifndef PRODUCT
522 if (PrintOpto && (Verbose || WizardMode)) {
523 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
524 if (Verbose) {
525 method()->print();
526 method()->print_codes();
527 _flow->print();
528 }
529 }
530 #endif
531 }
532 _tf = C->tf(); // the OSR entry type is different
533 }
534
535 #ifdef ASSERT
536 if (depth() == 1) {
537 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
538 } else {
539 assert(!this->is_osr_parse(), "no recursive OSR");
587 do_method_entry();
588 }
589
590 if (depth() == 1 && !failing()) {
591 if (C->clinit_barrier_on_entry()) {
592 // Add check to deoptimize the nmethod once the holder class is fully initialized
593 clinit_deopt();
594 }
595
596 // Add check to deoptimize the nmethod if RTM state was changed
597 rtm_deopt();
598 }
599
600 // Check for bailouts during method entry or RTM state check setup.
601 if (failing()) {
602 if (log) log->done("parse");
603 C->set_default_node_notes(caller_nn);
604 return;
605 }
606
607 // Handle inline type arguments
608 int arg_size = method()->arg_size();
609 for (int i = 0; i < arg_size; i++) {
610 Node* parm = local(i);
611 const Type* t = _gvn.type(parm);
612 if (t->is_inlinetypeptr()) {
613 // Create InlineTypeNode from the oop and replace the parameter
614 bool is_larval = (i == 0) && method()->is_object_constructor() && !method()->holder()->is_abstract() && !method()->holder()->is_java_lang_Object();
615 Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass(), !t->maybe_null(), is_larval);
616 replace_in_map(parm, vt);
617 } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() &&
618 t->isa_aryptr() != nullptr && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_not_null_free()) {
619 // Speculate on varargs Object array being not null-free (and therefore also not flat)
620 const TypePtr* spec_type = t->speculative();
621 spec_type = (spec_type != nullptr && spec_type->isa_aryptr() != nullptr) ? spec_type : t->is_aryptr();
622 spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free();
623 spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
624 Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
625 // TODO 8325106 Shouldn't we use replace_in_map here?
626 set_local(i, cast);
627 }
628 }
629
630 entry_map = map(); // capture any changes performed by method setup code
631 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
632
633 // We begin parsing as if we have just encountered a jump to the
634 // method entry.
635 Block* entry_block = start_block();
636 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
637 set_map_clone(entry_map);
638 merge_common(entry_block, entry_block->next_path_num());
639
640 #ifndef PRODUCT
641 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
642 set_parse_histogram( parse_histogram_obj );
643 #endif
644
645 // Parse all the basic blocks.
646 do_all_blocks();
647
648 // Check for bailouts during conversion to graph
649 if (failing()) {
795 void Parse::build_exits() {
796 // make a clone of caller to prevent sharing of side-effects
797 _exits.set_map(_exits.clone_map());
798 _exits.clean_stack(_exits.sp());
799 _exits.sync_jvms();
800
801 RegionNode* region = new RegionNode(1);
802 record_for_igvn(region);
803 gvn().set_type_bottom(region);
804 _exits.set_control(region);
805
806 // Note: iophi and memphi are not transformed until do_exits.
807 Node* iophi = new PhiNode(region, Type::ABIO);
808 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
809 gvn().set_type_bottom(iophi);
810 gvn().set_type_bottom(memphi);
811 _exits.set_i_o(iophi);
812 _exits.set_all_memory(memphi);
813
814 // Add a return value to the exit state. (Do not push it yet.)
815 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
816 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
817 if (ret_type->isa_int()) {
818 BasicType ret_bt = method()->return_type()->basic_type();
819 if (ret_bt == T_BOOLEAN ||
820 ret_bt == T_CHAR ||
821 ret_bt == T_BYTE ||
822 ret_bt == T_SHORT) {
823 ret_type = TypeInt::INT;
824 }
825 }
826
827 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
828 // becomes loaded during the subsequent parsing, the loaded and unloaded
829 // types will not join when we transform and push in do_exits().
830 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
831 if (ret_oop_type && !ret_oop_type->is_loaded()) {
832 ret_type = TypeOopPtr::BOTTOM;
833 }
834 int ret_size = type2size[ret_type->basic_type()];
835 Node* ret_phi = new PhiNode(region, ret_type);
836 gvn().set_type_bottom(ret_phi);
837 _exits.ensure_stack(ret_size);
838 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
839 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
840 _exits.set_argument(0, ret_phi); // here is where the parser finds it
841 // Note: ret_phi is not yet pushed, until do_exits.
842 }
843 }
844
845 //----------------------------build_start_state-------------------------------
846 // Construct a state which contains only the incoming arguments from an
847 // unknown caller. The method & bci will be null & InvocationEntryBci.
848 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
849 int arg_size = tf->domain_sig()->cnt();
850 int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
851 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
852 SafePointNode* map = new SafePointNode(max_size, jvms);
853 jvms->set_map(map);
854 record_for_igvn(map);
855 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
856 Node_Notes* old_nn = default_node_notes();
857 if (old_nn != nullptr && has_method()) {
858 Node_Notes* entry_nn = old_nn->clone(this);
859 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
860 entry_jvms->set_offsets(0);
861 entry_jvms->set_bci(entry_bci());
862 entry_nn->set_jvms(entry_jvms);
863 set_default_node_notes(entry_nn);
864 }
865 PhaseGVN& gvn = *initial_gvn();
866 uint i = 0;
867 int arg_num = 0;
868 for (uint j = 0; i < (uint)arg_size; i++) {
869 const Type* t = tf->domain_sig()->field_at(i);
870 Node* parm = nullptr;
871 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
872 // Inline type arguments are not passed by reference: we get an argument per
873 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
874 GraphKit kit(jvms, &gvn);
875 kit.set_control(map->control());
876 Node* old_mem = map->memory();
877 // Use immutable memory for inline type loads and restore it below
878 kit.set_all_memory(C->immutable_memory());
879 parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
880 map->set_control(kit.control());
881 map->set_memory(old_mem);
882 } else {
883 parm = gvn.transform(new ParmNode(start, j++));
884 }
885 map->init_req(i, parm);
886 // Record all these guys for later GVN.
887 record_for_igvn(parm);
888 if (i >= TypeFunc::Parms && t != Type::HALF) {
889 arg_num++;
890 }
891 }
892 for (; i < map->req(); i++) {
893 map->init_req(i, top());
894 }
895 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
896 set_default_node_notes(old_nn);
897 return jvms;
898 }
899
900 //-----------------------------make_node_notes---------------------------------
901 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
902 if (caller_nn == nullptr) return nullptr;
903 Node_Notes* nn = caller_nn->clone(C);
904 JVMState* caller_jvms = nn->jvms();
905 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
906 jvms->set_offsets(0);
907 jvms->set_bci(_entry_bci);
908 nn->set_jvms(jvms);
909 return nn;
910 }
911
912
913 //--------------------------return_values--------------------------------------
914 void Compile::return_values(JVMState* jvms) {
915 GraphKit kit(jvms);
916 Node* ret = new ReturnNode(TypeFunc::Parms,
917 kit.control(),
918 kit.i_o(),
919 kit.reset_memory(),
920 kit.frameptr(),
921 kit.returnadr());
922 // Add zero or 1 return values
923 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
924 if (ret_size > 0) {
925 kit.inc_sp(-ret_size); // pop the return value(s)
926 kit.sync_jvms();
927 Node* res = kit.argument(0);
928 if (tf()->returns_inline_type_as_fields()) {
929 // Multiple return values (inline type fields): add as many edges
930 // to the Return node as returned values.
931 InlineTypeNode* vt = res->as_InlineType();
932 ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
933 if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
934 ret->init_req(TypeFunc::Parms, vt);
935 } else {
936 // Return the tagged klass pointer to signal scalarization to the caller
937 Node* tagged_klass = vt->tagged_klass(kit.gvn());
938 // Return null if the inline type is null (IsInit field is not set)
939 Node* conv = kit.gvn().transform(new ConvI2LNode(vt->get_is_init()));
940 Node* shl = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
941 Node* shr = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
942 tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
943 ret->init_req(TypeFunc::Parms, tagged_klass);
944 }
945 uint idx = TypeFunc::Parms + 1;
946 vt->pass_fields(&kit, ret, idx, false, false);
947 } else {
948 ret->add_req(res);
949 // Note: The second dummy edge is not needed by a ReturnNode.
950 }
951 }
952 // bind it to root
953 root()->add_req(ret);
954 record_for_igvn(ret);
955 initial_gvn()->transform_no_reclaim(ret);
956 }
957
958 //------------------------rethrow_exceptions-----------------------------------
959 // Bind all exception states in the list into a single RethrowNode.
960 void Compile::rethrow_exceptions(JVMState* jvms) {
961 GraphKit kit(jvms);
962 if (!kit.has_exceptions()) return; // nothing to generate
963 // Load my combined exception state into the kit, with all phis transformed:
964 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
965 Node* ex_oop = kit.use_exception_state(ex_map);
966 RethrowNode* exit = new RethrowNode(kit.control(),
967 kit.i_o(), kit.reset_memory(),
968 kit.frameptr(), kit.returnadr(),
969 // like a return but with exception input
970 ex_oop);
1054 // to complete, we force all writes to complete.
1055 //
1056 // 2. Experimental VM option is used to force the barrier if any field
1057 // was written out in the constructor.
1058 //
1059 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1060 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1061 // MemBarVolatile is used before volatile load instead of after volatile
1062 // store, so there's no barrier after the store.
1063 // We want to guarantee the same behavior as on platforms with total store
1064 // order, although this is not required by the Java memory model.
1065 // In this case, we want to enforce visibility of volatile field
1066 // initializations which are performed in constructors.
1067 // So as with finals, we add a barrier here.
1068 //
1069 // "All bets are off" unless the first publication occurs after a
1070 // normal return from the constructor. We do not attempt to detect
1071 // such unusual early publications. But no barrier is needed on
1072 // exceptional returns, since they cannot publish normally.
1073 //
1074 if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1075 (wrote_final() ||
1076 (AlwaysSafeConstructors && wrote_fields()) ||
1077 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1078 _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1079
1080 // If Memory barrier is created for final fields write
1081 // and allocation node does not escape the initialize method,
1082 // then barrier introduced by allocation node can be removed.
1083 if (DoEscapeAnalysis && alloc_with_final()) {
1084 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final());
1085 alloc->compute_MemBar_redundancy(method());
1086 }
1087 if (PrintOpto && (Verbose || WizardMode)) {
1088 method()->print_name();
1089 tty->print_cr(" writes finals and needs a memory barrier");
1090 }
1091 }
1092
1093 // Any method can write a @Stable field; insert memory barriers
1094 // after those also. Can't bind predecessor allocation node (if any)
1095 // with barrier because allocation doesn't always dominate
1096 // MemBarRelease.
1097 if (wrote_stable()) {
1098 _exits.insert_mem_bar(Op_MemBarRelease);
1099 if (PrintOpto && (Verbose || WizardMode)) {
1100 method()->print_name();
1101 tty->print_cr(" writes @Stable and needs a memory barrier");
1102 }
1103 }
1104
1105 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1106 // transform each slice of the original memphi:
1107 mms.set_memory(_gvn.transform(mms.memory()));
1108 }
1109 // Clean up input MergeMems created by transforming the slices
1110 _gvn.transform(_exits.merged_memory());
1111
1112 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1113 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1114 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1115 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1116 // If the type we set for the ret_phi in build_exits() is too optimistic and
1117 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1118 // loading. It could also be due to an error, so mark this method as not compilable because
1119 // otherwise this could lead to an infinite compile loop.
1120 // In any case, this code path is rarely (and never in my testing) reached.
1121 #ifdef ASSERT
1122 tty->print_cr("# Can't determine return type.");
1123 tty->print_cr("# exit control");
1124 _exits.control()->dump(2);
1125 tty->print_cr("# ret phi type");
1126 _gvn.type(ret_phi)->dump();
1127 tty->print_cr("# ret phi");
1128 ret_phi->dump(2);
1129 #endif // ASSERT
1130 assert(false, "Can't determine return type.");
1131 C->record_method_not_compilable("Can't determine return type.");
1132 return;
1133 }
1197
1198 //-----------------------------create_entry_map-------------------------------
1199 // Initialize our parser map to contain the types at method entry.
1200 // For OSR, the map contains a single RawPtr parameter.
1201 // Initial monitor locking for sync. methods is performed by do_method_entry.
1202 SafePointNode* Parse::create_entry_map() {
1203 // Check for really stupid bail-out cases.
1204 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1205 if (len >= 32760) {
1206 // Bailout expected, this is a very rare edge case.
1207 C->record_method_not_compilable("too many local variables");
1208 return nullptr;
1209 }
1210
1211 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1212 _caller->map()->delete_replaced_nodes();
1213
1214 // If this is an inlined method, we may have to do a receiver null check.
1215 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1216 GraphKit kit(_caller);
1217 Node* receiver = kit.argument(0);
1218 Node* null_free = kit.null_check_receiver_before_call(method());
1219 _caller = kit.transfer_exceptions_into_jvms();
1220 if (receiver->is_InlineType() && receiver->as_InlineType()->is_larval()) {
1221 // Replace the larval inline type receiver in the exit map as well to make sure that
1222 // we can find and update it in Parse::do_call when we are done with the initialization.
1223 _exits.map()->replace_edge(receiver, null_free);
1224 }
1225 if (kit.stopped()) {
1226 _exits.add_exception_states_from(_caller);
1227 _exits.set_jvms(_caller);
1228 return nullptr;
1229 }
1230 }
1231
1232 assert(method() != nullptr, "parser must have a method");
1233
1234 // Create an initial safepoint to hold JVM state during parsing
1235 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1236 set_map(new SafePointNode(len, jvms));
1237 jvms->set_map(map());
1238 record_for_igvn(map());
1239 assert(jvms->endoff() == len, "correct jvms sizing");
1240
1241 SafePointNode* inmap = _caller->map();
1242 assert(inmap != nullptr, "must have inmap");
1243 // In case of null check on receiver above
1244 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1245
1246 uint i;
1247
1248 // Pass thru the predefined input parameters.
1249 for (i = 0; i < TypeFunc::Parms; i++) {
1250 map()->init_req(i, inmap->in(i));
1251 }
1252
1253 if (depth() == 1) {
1254 assert(map()->memory()->Opcode() == Op_Parm, "");
1255 // Insert the memory aliasing node
1256 set_all_memory(reset_memory());
1257 }
1258 assert(merged_memory(), "");
1259
1260 // Now add the locals which are initially bound to arguments:
1261 uint arg_size = tf()->domain_sig()->cnt();
1262 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1263 for (i = TypeFunc::Parms; i < arg_size; i++) {
1264 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1265 }
1266
1267 // Clear out the rest of the map (locals and stack)
1268 for (i = arg_size; i < len; i++) {
1269 map()->init_req(i, top());
1270 }
1271
1272 SafePointNode* entry_map = stop();
1273 return entry_map;
1274 }
1275
1276 //-----------------------------do_method_entry--------------------------------
1277 // Emit any code needed in the pseudo-block before BCI zero.
1278 // The main thing to do is lock the receiver of a synchronized method.
1279 void Parse::do_method_entry() {
1280 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1281 set_sp(0); // Java Stack Pointer
1315
1316 // If the method is synchronized, we need to construct a lock node, attach
1317 // it to the Start node, and pin it there.
1318 if (method()->is_synchronized()) {
1319 // Insert a FastLockNode right after the Start which takes as arguments
1320 // the current thread pointer, the "this" pointer & the address of the
1321 // stack slot pair used for the lock. The "this" pointer is a projection
1322 // off the start node, but the locking spot has to be constructed by
1323 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1324 // becomes the second argument to the FastLockNode call. The
1325 // FastLockNode becomes the new control parent to pin it to the start.
1326
1327 // Setup Object Pointer
1328 Node *lock_obj = nullptr;
1329 if (method()->is_static()) {
1330 ciInstance* mirror = _method->holder()->java_mirror();
1331 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1332 lock_obj = makecon(t_lock);
1333 } else { // Else pass the "this" pointer,
1334 lock_obj = local(0); // which is Parm0 from StartNode
1335 assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1336 }
1337 // Clear out dead values from the debug info.
1338 kill_dead_locals();
1339 // Build the FastLockNode
1340 _synch_lock = shared_lock(lock_obj);
1341 }
1342
1343 // Feed profiling data for parameters to the type system so it can
1344 // propagate it as speculative types
1345 record_profiled_parameters_for_speculation();
1346 }
1347
1348 //------------------------------init_blocks------------------------------------
1349 // Initialize our parser map to contain the types/monitors at method entry.
1350 void Parse::init_blocks() {
1351 // Create the blocks.
1352 _block_count = flow()->block_count();
1353 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1354
1355 // Initialize the structs.
1749 //--------------------handle_missing_successor---------------------------------
1750 void Parse::handle_missing_successor(int target_bci) {
1751 #ifndef PRODUCT
1752 Block* b = block();
1753 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1754 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1755 #endif
1756 ShouldNotReachHere();
1757 }
1758
1759 //--------------------------merge_common---------------------------------------
1760 void Parse::merge_common(Parse::Block* target, int pnum) {
1761 if (TraceOptoParse) {
1762 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1763 }
1764
1765 // Zap extra stack slots to top
1766 assert(sp() == target->start_sp(), "");
1767 clean_stack(sp());
1768
1769 // Check for merge conflicts involving inline types
1770 JVMState* old_jvms = map()->jvms();
1771 int old_bci = bci();
1772 JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1773 tmp_jvms->set_should_reexecute(true);
1774 tmp_jvms->bind_map(map());
1775 // Execution needs to restart a the next bytecode (entry of next
1776 // block)
1777 if (target->is_merged() ||
1778 pnum > PhiNode::Input ||
1779 target->is_handler() ||
1780 target->is_loop_head()) {
1781 set_parse_bci(target->start());
1782 for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1783 Node* n = map()->in(j); // Incoming change to target state.
1784 const Type* t = nullptr;
1785 if (tmp_jvms->is_loc(j)) {
1786 t = target->local_type_at(j - tmp_jvms->locoff());
1787 } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1788 t = target->stack_type_at(j - tmp_jvms->stkoff());
1789 }
1790 if (t != nullptr && t != Type::BOTTOM) {
1791 if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1792 // Allocate inline type in src block to be able to merge it with oop in target block
1793 map()->set_req(j, n->as_InlineType()->buffer(this));
1794 } else if (!n->is_InlineType() && t->is_inlinetypeptr()) {
1795 // Scalarize null in src block to be able to merge it with inline type in target block
1796 assert(gvn().type(n)->is_zero_type(), "Should have been scalarized");
1797 map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1798 }
1799 }
1800 }
1801 }
1802 old_jvms->bind_map(map());
1803 set_parse_bci(old_bci);
1804
1805 if (!target->is_merged()) { // No prior mapping at this bci
1806 if (TraceOptoParse) { tty->print(" with empty state"); }
1807
1808 // If this path is dead, do not bother capturing it as a merge.
1809 // It is "as if" we had 1 fewer predecessors from the beginning.
1810 if (stopped()) {
1811 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1812 return;
1813 }
1814
1815 // Make a region if we know there are multiple or unpredictable inputs.
1816 // (Also, if this is a plain fall-through, we might see another region,
1817 // which must not be allowed into this block's map.)
1818 if (pnum > PhiNode::Input // Known multiple inputs.
1819 || target->is_handler() // These have unpredictable inputs.
1820 || target->is_loop_head() // Known multiple inputs
1821 || control()->is_Region()) { // We must hide this guy.
1822
1823 int current_bci = bci();
1824 set_parse_bci(target->start()); // Set target bci
1839 record_for_igvn(r);
1840 // zap all inputs to null for debugging (done in Node(uint) constructor)
1841 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1842 r->init_req(pnum, control());
1843 set_control(r);
1844 target->copy_irreducible_status_to(r, jvms());
1845 set_parse_bci(current_bci); // Restore bci
1846 }
1847
1848 // Convert the existing Parser mapping into a mapping at this bci.
1849 store_state_to(target);
1850 assert(target->is_merged(), "do not come here twice");
1851
1852 } else { // Prior mapping at this bci
1853 if (TraceOptoParse) { tty->print(" with previous state"); }
1854 #ifdef ASSERT
1855 if (target->is_SEL_head()) {
1856 target->mark_merged_backedge(block());
1857 }
1858 #endif
1859
1860 // We must not manufacture more phis if the target is already parsed.
1861 bool nophi = target->is_parsed();
1862
1863 SafePointNode* newin = map();// Hang on to incoming mapping
1864 Block* save_block = block(); // Hang on to incoming block;
1865 load_state_from(target); // Get prior mapping
1866
1867 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1868 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1869 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1870 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1871
1872 // Iterate over my current mapping and the old mapping.
1873 // Where different, insert Phi functions.
1874 // Use any existing Phi functions.
1875 assert(control()->is_Region(), "must be merging to a region");
1876 RegionNode* r = control()->as_Region();
1877
1878 // Compute where to merge into
1879 // Merge incoming control path
1880 r->init_req(pnum, newin->control());
1881
1882 if (pnum == 1) { // Last merge for this Region?
1883 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1884 Node* result = _gvn.transform_no_reclaim(r);
1885 if (r != result && TraceOptoParse) {
1886 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1887 }
1888 }
1889 record_for_igvn(r);
1890 }
1891
1892 // Update all the non-control inputs to map:
1893 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1894 bool check_elide_phi = target->is_SEL_backedge(save_block);
1895 bool last_merge = (pnum == PhiNode::Input);
1896 for (uint j = 1; j < newin->req(); j++) {
1897 Node* m = map()->in(j); // Current state of target.
1898 Node* n = newin->in(j); // Incoming change to target state.
1899 PhiNode* phi;
1900 if (m->is_Phi() && m->as_Phi()->region() == r) {
1901 phi = m->as_Phi();
1902 } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1903 phi = m->as_InlineType()->get_oop()->as_Phi();
1904 } else {
1905 phi = nullptr;
1906 }
1907 if (m != n) { // Different; must merge
1908 switch (j) {
1909 // Frame pointer and Return Address never changes
1910 case TypeFunc::FramePtr:// Drop m, use the original value
1911 case TypeFunc::ReturnAdr:
1912 break;
1913 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1914 assert(phi == nullptr, "the merge contains phis, not vice versa");
1915 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1916 continue;
1917 default: // All normal stuff
1918 if (phi == nullptr) {
1919 const JVMState* jvms = map()->jvms();
1920 if (EliminateNestedLocks &&
1921 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1922 // BoxLock nodes are not commoning.
1923 // Use old BoxLock node as merged box.
1924 assert(newin->jvms()->is_monitor_box(j), "sanity");
1925 // This assert also tests that nodes are BoxLock.
1926 assert(BoxLockNode::same_slot(n, m), "sanity");
1927 C->gvn_replace_by(n, m);
1928 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1929 phi = ensure_phi(j, nophi);
1930 }
1931 }
1932 break;
1933 }
1934 }
1935 // At this point, n might be top if:
1936 // - there is no phi (because TypeFlow detected a conflict), or
1937 // - the corresponding control edges is top (a dead incoming path)
1938 // It is a bug if we create a phi which sees a garbage value on a live path.
1939
1940 // Merging two inline types?
1941 if (phi != nullptr && phi->bottom_type()->is_inlinetypeptr()) {
1942 // Reload current state because it may have been updated by ensure_phi
1943 m = map()->in(j);
1944 InlineTypeNode* vtm = m->as_InlineType(); // Current inline type
1945 InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
1946 assert(vtm->get_oop() == phi, "Inline type should have Phi input");
1947 if (TraceOptoParse) {
1948 #ifdef ASSERT
1949 tty->print_cr("\nMerging inline types");
1950 tty->print_cr("Current:");
1951 vtm->dump(2);
1952 tty->print_cr("Incoming:");
1953 vtn->dump(2);
1954 tty->cr();
1955 #endif
1956 }
1957 // Do the merge
1958 vtm->merge_with(&_gvn, vtn, pnum, last_merge);
1959 if (last_merge) {
1960 map()->set_req(j, _gvn.transform_no_reclaim(vtm));
1961 record_for_igvn(vtm);
1962 }
1963 } else if (phi != nullptr) {
1964 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1965 assert(phi->region() == r, "");
1966 phi->set_req(pnum, n); // Then add 'n' to the merge
1967 if (last_merge) {
1968 // Last merge for this Phi.
1969 // So far, Phis have had a reasonable type from ciTypeFlow.
1970 // Now _gvn will join that with the meet of current inputs.
1971 // BOTTOM is never permissible here, 'cause pessimistically
1972 // Phis of pointers cannot lose the basic pointer type.
1973 debug_only(const Type* bt1 = phi->bottom_type());
1974 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1975 map()->set_req(j, _gvn.transform_no_reclaim(phi));
1976 debug_only(const Type* bt2 = phi->bottom_type());
1977 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1978 record_for_igvn(phi);
1979 }
1980 }
1981 } // End of for all values to be merged
1982
1983 if (last_merge && !r->in(0)) { // The occasional useless Region
1984 assert(control() == r, "");
1985 set_control(r->nonnull_req());
1986 }
1987
1988 map()->merge_replaced_nodes_with(newin);
1989
1990 // newin has been subsumed into the lazy merge, and is now dead.
1991 set_block(save_block);
1992
1993 stop(); // done with this guy, for now
1994 }
1995
1996 if (TraceOptoParse) {
1997 tty->print_cr(" on path %d", pnum);
1998 }
1999
2000 // Done with this parser state.
2001 assert(stopped(), "");
2002 }
2003
2115
2116 // Add new path to the region.
2117 uint pnum = r->req();
2118 r->add_req(nullptr);
2119
2120 for (uint i = 1; i < map->req(); i++) {
2121 Node* n = map->in(i);
2122 if (i == TypeFunc::Memory) {
2123 // Ensure a phi on all currently known memories.
2124 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2125 Node* phi = mms.memory();
2126 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2127 assert(phi->req() == pnum, "must be same size as region");
2128 phi->add_req(nullptr);
2129 }
2130 }
2131 } else {
2132 if (n->is_Phi() && n->as_Phi()->region() == r) {
2133 assert(n->req() == pnum, "must be same size as region");
2134 n->add_req(nullptr);
2135 } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2136 n->as_InlineType()->add_new_path(r);
2137 }
2138 }
2139 }
2140
2141 return pnum;
2142 }
2143
2144 //------------------------------ensure_phi-------------------------------------
2145 // Turn the idx'th entry of the current map into a Phi
2146 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2147 SafePointNode* map = this->map();
2148 Node* region = map->control();
2149 assert(region->is_Region(), "");
2150
2151 Node* o = map->in(idx);
2152 assert(o != nullptr, "");
2153
2154 if (o == top()) return nullptr; // TOP always merges into TOP
2155
2156 if (o->is_Phi() && o->as_Phi()->region() == region) {
2157 return o->as_Phi();
2158 }
2159 InlineTypeNode* vt = o->isa_InlineType();
2160 if (vt != nullptr && vt->has_phi_inputs(region)) {
2161 return vt->get_oop()->as_Phi();
2162 }
2163
2164 // Now use a Phi here for merging
2165 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2166 const JVMState* jvms = map->jvms();
2167 const Type* t = nullptr;
2168 if (jvms->is_loc(idx)) {
2169 t = block()->local_type_at(idx - jvms->locoff());
2170 } else if (jvms->is_stk(idx)) {
2171 t = block()->stack_type_at(idx - jvms->stkoff());
2172 } else if (jvms->is_mon(idx)) {
2173 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2174 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2175 } else if ((uint)idx < TypeFunc::Parms) {
2176 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2177 } else {
2178 assert(false, "no type information for this phi");
2179 }
2180
2181 // If the type falls to bottom, then this must be a local that
2182 // is already dead or is mixing ints and oops or some such.
2183 // Forcing it to top makes it go dead.
2184 if (t == Type::BOTTOM) {
2185 map->set_req(idx, top());
2186 return nullptr;
2187 }
2188
2189 // Do not create phis for top either.
2190 // A top on a non-null control flow must be an unused even after the.phi.
2191 if (t == Type::TOP || t == Type::HALF) {
2192 map->set_req(idx, top());
2193 return nullptr;
2194 }
2195
2196 if (vt != nullptr && t->is_inlinetypeptr()) {
2197 // Inline types are merged by merging their field values.
2198 // Create a cloned InlineTypeNode with phi inputs that
2199 // represents the merged inline type and update the map.
2200 // TODO 8325106 Why can't we pass map here?
2201 vt = vt->clone_with_phis(&_gvn, region);
2202 map->set_req(idx, vt);
2203 return vt->get_oop()->as_Phi();
2204 } else {
2205 PhiNode* phi = PhiNode::make(region, o, t);
2206 gvn().set_type(phi, t);
2207 if (C->do_escape_analysis()) record_for_igvn(phi);
2208 map->set_req(idx, phi);
2209 return phi;
2210 }
2211 }
2212
2213 //--------------------------ensure_memory_phi----------------------------------
2214 // Turn the idx'th slice of the current memory into a Phi
2215 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2216 MergeMemNode* mem = merged_memory();
2217 Node* region = control();
2218 assert(region->is_Region(), "");
2219
2220 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2221 assert(o != nullptr && o != top(), "");
2222
2223 PhiNode* phi;
2224 if (o->is_Phi() && o->as_Phi()->region() == region) {
2225 phi = o->as_Phi();
2226 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2227 // clone the shared base memory phi to make a new memory split
2228 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2229 const Type* t = phi->bottom_type();
2230 const TypePtr* adr_type = C->get_adr_type(idx);
2358 Node* chk = _gvn.transform( new CmpINode(opq, profile_state) );
2359 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
2360 // Branch to failure if state was changed
2361 { BuildCutout unless(this, tst, PROB_ALWAYS);
2362 uncommon_trap(Deoptimization::Reason_rtm_state_change,
2363 Deoptimization::Action_make_not_entrant);
2364 }
2365 }
2366 #endif
2367 }
2368
2369 //------------------------------return_current---------------------------------
2370 // Append current _map to _exit_return
2371 void Parse::return_current(Node* value) {
2372 if (RegisterFinalizersAtInit &&
2373 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2374 call_register_finalizer();
2375 }
2376
2377 // Do not set_parse_bci, so that return goo is credited to the return insn.
2378 // vreturn can trigger an allocation so vreturn can throw. Setting
2379 // the bci here breaks exception handling. Commenting this out
2380 // doesn't seem to break anything.
2381 // set_bci(InvocationEntryBci);
2382 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2383 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2384 }
2385 if (C->env()->dtrace_method_probes()) {
2386 make_dtrace_method_exit(method());
2387 }
2388 // frame pointer is always same, already captured
2389 if (value != nullptr) {
2390 Node* phi = _exits.argument(0);
2391 const Type* return_type = phi->bottom_type();
2392 const TypeInstPtr* tr = return_type->isa_instptr();
2393 assert(!value->is_InlineType() || !value->as_InlineType()->is_larval(), "returning a larval");
2394 if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2395 return_type->is_inlinetypeptr()) {
2396 // Inline type is returned as fields, make sure it is scalarized
2397 if (!value->is_InlineType()) {
2398 value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass(), false);
2399 }
2400 if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2401 // Returning from root or an incrementally inlined method. Make sure all non-flat
2402 // fields are buffered and re-execute if allocation triggers deoptimization.
2403 PreserveReexecuteState preexecs(this);
2404 assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2405 jvms()->set_should_reexecute(true);
2406 inc_sp(1);
2407 value = value->as_InlineType()->allocate_fields(this);
2408 }
2409 } else if (value->is_InlineType()) {
2410 // Inline type is returned as oop, make sure it is buffered and re-execute
2411 // if allocation triggers deoptimization.
2412 PreserveReexecuteState preexecs(this);
2413 jvms()->set_should_reexecute(true);
2414 inc_sp(1);
2415 value = value->as_InlineType()->buffer(this);
2416 }
2417 // ...else
2418 // If returning oops to an interface-return, there is a silent free
2419 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2420 phi->add_req(value);
2421 }
2422
2423 SafePointNode* exit_return = _exits.map();
2424 exit_return->in( TypeFunc::Control )->add_req( control() );
2425 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2426 Node *mem = exit_return->in( TypeFunc::Memory );
2427 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2428 if (mms.is_empty()) {
2429 // get a copy of the base memory, and patch just this one input
2430 const TypePtr* adr_type = mms.adr_type(C);
2431 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2432 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2433 gvn().set_type_bottom(phi);
2434 phi->del_req(phi->req()-1); // prepare to re-patch
2435 mms.set_memory(phi);
2436 }
2437 mms.memory()->add_req(mms.memory2());
2438 }
2439
2440 if (_first_return) {
2441 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2442 _first_return = false;
2443 } else {
2444 _exits.map()->merge_replaced_nodes_with(map());
2445 }
2446
2447 stop_and_kill_map(); // This CFG path dies here
2448 }
2449
2450
2451 //------------------------------add_safepoint----------------------------------
2452 void Parse::add_safepoint() {
2453 uint parms = TypeFunc::Parms+1;
2454
2455 // Clear out dead values from the debug info.
2456 kill_dead_locals();
2457
2458 // Clone the JVM State
2459 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|