13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/method.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/c2compiler.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/idealGraphPrinter.hpp"
34 #include "opto/locknode.hpp"
35 #include "opto/memnode.hpp"
36 #include "opto/opaquenode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/type.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/copy.hpp"
46
47 // Static array so we can figure out which bytecodes stop us from compiling
48 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
49 // and eventually should be encapsulated in a proper class (gri 8/18/98).
50
51 #ifndef PRODUCT
52 uint nodes_created = 0;
53 uint methods_parsed = 0;
85 }
86 if (all_null_checks_found) {
87 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
88 (100*implicit_null_checks)/all_null_checks_found);
89 }
90 if (SharedRuntime::_implicit_null_throws) {
91 tty->print_cr("%u implicit null exceptions at runtime",
92 SharedRuntime::_implicit_null_throws);
93 }
94
95 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
96 BytecodeParseHistogram::print();
97 }
98 }
99 #endif
100
101 //------------------------------ON STACK REPLACEMENT---------------------------
102
103 // Construct a node which can be used to get incoming state for
104 // on stack replacement.
105 Node *Parse::fetch_interpreter_state(int index,
106 BasicType bt,
107 Node *local_addrs,
108 Node *local_addrs_base) {
109 Node *mem = memory(Compile::AliasIdxRaw);
110 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
111 Node *ctl = control();
112
113 // Very similar to LoadNode::make, except we handle un-aligned longs and
114 // doubles on Sparc. Intel can handle them just fine directly.
115 Node *l = nullptr;
116 switch (bt) { // Signature is flattened
117 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
118 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
119 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
120 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
121 case T_LONG:
122 case T_DOUBLE: {
123 // Since arguments are in reverse order, the argument address 'adr'
124 // refers to the back half of the long/double. Recompute adr.
125 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
126 if (Matcher::misaligned_doubles_ok) {
127 l = (bt == T_DOUBLE)
128 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
130 } else {
131 l = (bt == T_DOUBLE)
132 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
133 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
134 }
135 break;
136 }
137 default: ShouldNotReachHere();
138 }
139 return _gvn.transform(l);
140 }
141
142 // Helper routine to prevent the interpreter from handing
143 // unexpected typestate to an OSR method.
144 // The Node l is a value newly dug out of the interpreter frame.
145 // The type is the type predicted by ciTypeFlow. Note that it is
146 // not a general type, but can only come from Type::get_typeflow_type.
147 // The safepoint is a map which will feed an uncommon trap.
148 Node* Parse::check_interpreter_type(Node* l, const Type* type,
149 SafePointNode* &bad_type_exit) {
150
151 const TypeOopPtr* tp = type->isa_oopptr();
152
153 // TypeFlow may assert null-ness if a type appears unloaded.
154 if (type == TypePtr::NULL_PTR ||
155 (tp != nullptr && !tp->is_loaded())) {
156 // Value must be null, not a real oop.
157 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
158 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
159 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
160 set_control(_gvn.transform( new IfTrueNode(iff) ));
161 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
162 bad_type_exit->control()->add_req(bad_type);
163 l = null();
164 }
165
166 // Typeflow can also cut off paths from the CFG, based on
167 // types which appear unloaded, or call sites which appear unlinked.
168 // When paths are cut off, values at later merge points can rise
169 // toward more specific classes. Make sure these specific classes
170 // are still in effect.
171 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
172 // TypeFlow asserted a specific object type. Value must have that type.
173 Node* bad_type_ctrl = nullptr;
174 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
175 bad_type_exit->control()->add_req(bad_type_ctrl);
176 }
177
178 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
179 return l;
180 }
181
182 // Helper routine which sets up elements of the initial parser map when
183 // performing a parse for on stack replacement. Add values into map.
184 // The only parameter contains the address of a interpreter arguments.
185 void Parse::load_interpreter_state(Node* osr_buf) {
186 int index;
187 int max_locals = jvms()->loc_size();
188 int max_stack = jvms()->stk_size();
189
190
191 // Mismatch between method and jvms can occur since map briefly held
192 // an OSR entry state (which takes up one RawPtr word).
193 assert(max_locals == method()->max_locals(), "sanity");
194 assert(max_stack >= method()->max_stack(), "sanity");
195 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
196 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
197
198 // Find the start block.
199 Block* osr_block = start_block();
200 assert(osr_block->start() == osr_bci(), "sanity");
201
202 // Set initial BCI.
203 set_parse_bci(osr_block->start());
204
205 // Set initial stack depth.
206 set_sp(osr_block->start_sp());
207
208 // Check bailouts. We currently do not perform on stack replacement
209 // of loops in catch blocks or loops which branch with a non-empty stack.
210 if (sp() != 0) {
212 return;
213 }
214 // Do not OSR inside finally clauses:
215 if (osr_block->has_trap_at(osr_block->start())) {
216 assert(false, "OSR starts with an immediate trap");
217 C->record_method_not_compilable("OSR starts with an immediate trap");
218 return;
219 }
220
221 // Commute monitors from interpreter frame to compiler frame.
222 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
223 int mcnt = osr_block->flow()->monitor_count();
224 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
225 for (index = 0; index < mcnt; index++) {
226 // Make a BoxLockNode for the monitor.
227 Node *box = new BoxLockNode(next_monitor());
228 // Check for bailout after new BoxLockNode
229 if (failing()) { return; }
230 box = _gvn.transform(box);
231
232
233 // Displaced headers and locked objects are interleaved in the
234 // temp OSR buffer. We only copy the locked objects out here.
235 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
236 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
237 // Try and copy the displaced header to the BoxNode
238 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
239
240
241 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
242
243 // Build a bogus FastLockNode (no code will be generated) and push the
244 // monitor into our debug info.
245 const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
246 map()->push_monitor(flock);
247
248 // If the lock is our method synchronization lock, tuck it away in
249 // _sync_lock for return and rethrow exit paths.
250 if (index == 0 && method()->is_synchronized()) {
251 _synch_lock = flock;
252 }
253 }
254
255 // Use the raw liveness computation to make sure that unexpected
256 // values don't propagate into the OSR frame.
257 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
258 if (!live_locals.is_valid()) {
259 // Degenerate or breakpointed method.
287 if (C->log() != nullptr) {
288 C->log()->elem("OSR_mismatch local_index='%d'",index);
289 }
290 set_local(index, null());
291 // and ignore it for the loads
292 continue;
293 }
294 }
295
296 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
297 if (type == Type::TOP || type == Type::HALF) {
298 continue;
299 }
300 // If the type falls to bottom, then this must be a local that
301 // is mixing ints and oops or some such. Forcing it to top
302 // makes it go dead.
303 if (type == Type::BOTTOM) {
304 continue;
305 }
306 // Construct code to access the appropriate local.
307 BasicType bt = type->basic_type();
308 if (type == TypePtr::NULL_PTR) {
309 // Ptr types are mixed together with T_ADDRESS but null is
310 // really for T_OBJECT types so correct it.
311 bt = T_OBJECT;
312 }
313 Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
314 set_local(index, value);
315 }
316
317 // Extract the needed stack entries from the interpreter frame.
318 for (index = 0; index < sp(); index++) {
319 const Type *type = osr_block->stack_type_at(index);
320 if (type != Type::TOP) {
321 // Currently the compiler bails out when attempting to on stack replace
322 // at a bci with a non-empty stack. We should not reach here.
323 ShouldNotReachHere();
324 }
325 }
326
327 // End the OSR migration
328 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
329 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
330 "OSR_migration_end", TypeRawPtr::BOTTOM,
331 osr_buf);
332
333 // Now that the interpreter state is loaded, make sure it will match
489 // either breakpoint setting or hotswapping of methods may
490 // cause deoptimization.
491 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
492 C->dependencies()->assert_evol_method(method());
493 }
494
495 NOT_PRODUCT(methods_seen++);
496
497 // Do some special top-level things.
498 if (depth() == 1 && C->is_osr_compilation()) {
499 _tf = C->tf(); // the OSR entry type is different
500 _entry_bci = C->entry_bci();
501 _flow = method()->get_osr_flow_analysis(osr_bci());
502 } else {
503 _tf = TypeFunc::make(method());
504 _entry_bci = InvocationEntryBci;
505 _flow = method()->get_flow_analysis();
506 }
507
508 if (_flow->failing()) {
509 assert(false, "type flow analysis failed during parsing");
510 C->record_method_not_compilable(_flow->failure_reason());
511 #ifndef PRODUCT
512 if (PrintOpto && (Verbose || WizardMode)) {
513 if (is_osr_parse()) {
514 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
515 } else {
516 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
517 }
518 if (Verbose) {
519 method()->print();
520 method()->print_codes();
521 _flow->print();
522 }
523 }
524 #endif
525 }
526
527 #ifdef ASSERT
528 if (depth() == 1) {
529 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
583 do_method_entry();
584 }
585
586 if (depth() == 1 && !failing()) {
587 if (C->clinit_barrier_on_entry()) {
588 // Add check to deoptimize the nmethod once the holder class is fully initialized
589 clinit_deopt();
590 }
591
592 // Add check to deoptimize the nmethod if RTM state was changed
593 rtm_deopt();
594 }
595
596 // Check for bailouts during method entry or RTM state check setup.
597 if (failing()) {
598 if (log) log->done("parse");
599 C->set_default_node_notes(caller_nn);
600 return;
601 }
602
603 entry_map = map(); // capture any changes performed by method setup code
604 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
605
606 // We begin parsing as if we have just encountered a jump to the
607 // method entry.
608 Block* entry_block = start_block();
609 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
610 set_map_clone(entry_map);
611 merge_common(entry_block, entry_block->next_path_num());
612
613 #ifndef PRODUCT
614 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
615 set_parse_histogram( parse_histogram_obj );
616 #endif
617
618 // Parse all the basic blocks.
619 do_all_blocks();
620
621 // Check for bailouts during conversion to graph
622 if (failing()) {
768 void Parse::build_exits() {
769 // make a clone of caller to prevent sharing of side-effects
770 _exits.set_map(_exits.clone_map());
771 _exits.clean_stack(_exits.sp());
772 _exits.sync_jvms();
773
774 RegionNode* region = new RegionNode(1);
775 record_for_igvn(region);
776 gvn().set_type_bottom(region);
777 _exits.set_control(region);
778
779 // Note: iophi and memphi are not transformed until do_exits.
780 Node* iophi = new PhiNode(region, Type::ABIO);
781 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
782 gvn().set_type_bottom(iophi);
783 gvn().set_type_bottom(memphi);
784 _exits.set_i_o(iophi);
785 _exits.set_all_memory(memphi);
786
787 // Add a return value to the exit state. (Do not push it yet.)
788 if (tf()->range()->cnt() > TypeFunc::Parms) {
789 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
790 if (ret_type->isa_int()) {
791 BasicType ret_bt = method()->return_type()->basic_type();
792 if (ret_bt == T_BOOLEAN ||
793 ret_bt == T_CHAR ||
794 ret_bt == T_BYTE ||
795 ret_bt == T_SHORT) {
796 ret_type = TypeInt::INT;
797 }
798 }
799
800 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
801 // becomes loaded during the subsequent parsing, the loaded and unloaded
802 // types will not join when we transform and push in do_exits().
803 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
804 if (ret_oop_type && !ret_oop_type->is_loaded()) {
805 ret_type = TypeOopPtr::BOTTOM;
806 }
807 int ret_size = type2size[ret_type->basic_type()];
808 Node* ret_phi = new PhiNode(region, ret_type);
809 gvn().set_type_bottom(ret_phi);
810 _exits.ensure_stack(ret_size);
811 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
812 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
813 _exits.set_argument(0, ret_phi); // here is where the parser finds it
814 // Note: ret_phi is not yet pushed, until do_exits.
815 }
816 }
817
818
819 //----------------------------build_start_state-------------------------------
820 // Construct a state which contains only the incoming arguments from an
821 // unknown caller. The method & bci will be null & InvocationEntryBci.
822 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
823 int arg_size = tf->domain()->cnt();
824 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
825 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
826 SafePointNode* map = new SafePointNode(max_size, jvms);
827 record_for_igvn(map);
828 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
829 Node_Notes* old_nn = default_node_notes();
830 if (old_nn != nullptr && has_method()) {
831 Node_Notes* entry_nn = old_nn->clone(this);
832 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
833 entry_jvms->set_offsets(0);
834 entry_jvms->set_bci(entry_bci());
835 entry_nn->set_jvms(entry_jvms);
836 set_default_node_notes(entry_nn);
837 }
838 uint i;
839 for (i = 0; i < (uint)arg_size; i++) {
840 Node* parm = initial_gvn()->transform(new ParmNode(start, i));
841 map->init_req(i, parm);
842 // Record all these guys for later GVN.
843 record_for_igvn(parm);
844 }
845 for (; i < map->req(); i++) {
846 map->init_req(i, top());
847 }
848 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
849 set_default_node_notes(old_nn);
850 jvms->set_map(map);
851 return jvms;
852 }
853
854 //-----------------------------make_node_notes---------------------------------
855 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
856 if (caller_nn == nullptr) return nullptr;
857 Node_Notes* nn = caller_nn->clone(C);
858 JVMState* caller_jvms = nn->jvms();
859 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
860 jvms->set_offsets(0);
861 jvms->set_bci(_entry_bci);
862 nn->set_jvms(jvms);
863 return nn;
864 }
865
866
867 //--------------------------return_values--------------------------------------
868 void Compile::return_values(JVMState* jvms) {
869 GraphKit kit(jvms);
870 Node* ret = new ReturnNode(TypeFunc::Parms,
871 kit.control(),
872 kit.i_o(),
873 kit.reset_memory(),
874 kit.frameptr(),
875 kit.returnadr());
876 // Add zero or 1 return values
877 int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
878 if (ret_size > 0) {
879 kit.inc_sp(-ret_size); // pop the return value(s)
880 kit.sync_jvms();
881 ret->add_req(kit.argument(0));
882 // Note: The second dummy edge is not needed by a ReturnNode.
883 }
884 // bind it to root
885 root()->add_req(ret);
886 record_for_igvn(ret);
887 initial_gvn()->transform(ret);
888 }
889
890 //------------------------rethrow_exceptions-----------------------------------
891 // Bind all exception states in the list into a single RethrowNode.
892 void Compile::rethrow_exceptions(JVMState* jvms) {
893 GraphKit kit(jvms);
894 if (!kit.has_exceptions()) return; // nothing to generate
895 // Load my combined exception state into the kit, with all phis transformed:
896 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
897 Node* ex_oop = kit.use_exception_state(ex_map);
898 RethrowNode* exit = new RethrowNode(kit.control(),
899 kit.i_o(), kit.reset_memory(),
900 kit.frameptr(), kit.returnadr(),
901 // like a return but with exception input
902 ex_oop);
986 // to complete, we force all writes to complete.
987 //
988 // 2. Experimental VM option is used to force the barrier if any field
989 // was written out in the constructor.
990 //
991 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
992 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
993 // MemBarVolatile is used before volatile load instead of after volatile
994 // store, so there's no barrier after the store.
995 // We want to guarantee the same behavior as on platforms with total store
996 // order, although this is not required by the Java memory model.
997 // In this case, we want to enforce visibility of volatile field
998 // initializations which are performed in constructors.
999 // So as with finals, we add a barrier here.
1000 //
1001 // "All bets are off" unless the first publication occurs after a
1002 // normal return from the constructor. We do not attempt to detect
1003 // such unusual early publications. But no barrier is needed on
1004 // exceptional returns, since they cannot publish normally.
1005 //
1006 if (method()->is_initializer() &&
1007 (wrote_final() ||
1008 (AlwaysSafeConstructors && wrote_fields()) ||
1009 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1010 _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1011
1012 // If Memory barrier is created for final fields write
1013 // and allocation node does not escape the initialize method,
1014 // then barrier introduced by allocation node can be removed.
1015 if (DoEscapeAnalysis && alloc_with_final()) {
1016 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final());
1017 alloc->compute_MemBar_redundancy(method());
1018 }
1019 if (PrintOpto && (Verbose || WizardMode)) {
1020 method()->print_name();
1021 tty->print_cr(" writes finals and needs a memory barrier");
1022 }
1023 }
1024
1025 // Any method can write a @Stable field; insert memory barriers
1026 // after those also. Can't bind predecessor allocation node (if any)
1027 // with barrier because allocation doesn't always dominate
1028 // MemBarRelease.
1029 if (wrote_stable()) {
1030 _exits.insert_mem_bar(Op_MemBarRelease);
1031 if (PrintOpto && (Verbose || WizardMode)) {
1032 method()->print_name();
1033 tty->print_cr(" writes @Stable and needs a memory barrier");
1034 }
1035 }
1036
1037 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1038 // transform each slice of the original memphi:
1039 mms.set_memory(_gvn.transform(mms.memory()));
1040 }
1041 // Clean up input MergeMems created by transforming the slices
1042 _gvn.transform(_exits.merged_memory());
1043
1044 if (tf()->range()->cnt() > TypeFunc::Parms) {
1045 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1046 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1047 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1048 // If the type we set for the ret_phi in build_exits() is too optimistic and
1049 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1050 // loading. It could also be due to an error, so mark this method as not compilable because
1051 // otherwise this could lead to an infinite compile loop.
1052 // In any case, this code path is rarely (and never in my testing) reached.
1053 #ifdef ASSERT
1054 tty->print_cr("# Can't determine return type.");
1055 tty->print_cr("# exit control");
1056 _exits.control()->dump(2);
1057 tty->print_cr("# ret phi type");
1058 _gvn.type(ret_phi)->dump();
1059 tty->print_cr("# ret phi");
1060 ret_phi->dump(2);
1061 #endif // ASSERT
1062 assert(false, "Can't determine return type.");
1063 C->record_method_not_compilable("Can't determine return type.");
1064 return;
1065 }
1129
1130 //-----------------------------create_entry_map-------------------------------
1131 // Initialize our parser map to contain the types at method entry.
1132 // For OSR, the map contains a single RawPtr parameter.
1133 // Initial monitor locking for sync. methods is performed by do_method_entry.
1134 SafePointNode* Parse::create_entry_map() {
1135 // Check for really stupid bail-out cases.
1136 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1137 if (len >= 32760) {
1138 // Bailout expected, this is a very rare edge case.
1139 C->record_method_not_compilable("too many local variables");
1140 return nullptr;
1141 }
1142
1143 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1144 _caller->map()->delete_replaced_nodes();
1145
1146 // If this is an inlined method, we may have to do a receiver null check.
1147 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1148 GraphKit kit(_caller);
1149 kit.null_check_receiver_before_call(method());
1150 _caller = kit.transfer_exceptions_into_jvms();
1151 if (kit.stopped()) {
1152 _exits.add_exception_states_from(_caller);
1153 _exits.set_jvms(_caller);
1154 return nullptr;
1155 }
1156 }
1157
1158 assert(method() != nullptr, "parser must have a method");
1159
1160 // Create an initial safepoint to hold JVM state during parsing
1161 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1162 set_map(new SafePointNode(len, jvms));
1163 jvms->set_map(map());
1164 record_for_igvn(map());
1165 assert(jvms->endoff() == len, "correct jvms sizing");
1166
1167 SafePointNode* inmap = _caller->map();
1168 assert(inmap != nullptr, "must have inmap");
1169 // In case of null check on receiver above
1170 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1171
1172 uint i;
1173
1174 // Pass thru the predefined input parameters.
1175 for (i = 0; i < TypeFunc::Parms; i++) {
1176 map()->init_req(i, inmap->in(i));
1177 }
1178
1179 if (depth() == 1) {
1180 assert(map()->memory()->Opcode() == Op_Parm, "");
1181 // Insert the memory aliasing node
1182 set_all_memory(reset_memory());
1183 }
1184 assert(merged_memory(), "");
1185
1186 // Now add the locals which are initially bound to arguments:
1187 uint arg_size = tf()->domain()->cnt();
1188 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1189 for (i = TypeFunc::Parms; i < arg_size; i++) {
1190 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1191 }
1192
1193 // Clear out the rest of the map (locals and stack)
1194 for (i = arg_size; i < len; i++) {
1195 map()->init_req(i, top());
1196 }
1197
1198 SafePointNode* entry_map = stop();
1199 return entry_map;
1200 }
1201
1202 //-----------------------------do_method_entry--------------------------------
1203 // Emit any code needed in the pseudo-block before BCI zero.
1204 // The main thing to do is lock the receiver of a synchronized method.
1205 void Parse::do_method_entry() {
1206 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1207 set_sp(0); // Java Stack Pointer
1241
1242 // If the method is synchronized, we need to construct a lock node, attach
1243 // it to the Start node, and pin it there.
1244 if (method()->is_synchronized()) {
1245 // Insert a FastLockNode right after the Start which takes as arguments
1246 // the current thread pointer, the "this" pointer & the address of the
1247 // stack slot pair used for the lock. The "this" pointer is a projection
1248 // off the start node, but the locking spot has to be constructed by
1249 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1250 // becomes the second argument to the FastLockNode call. The
1251 // FastLockNode becomes the new control parent to pin it to the start.
1252
1253 // Setup Object Pointer
1254 Node *lock_obj = nullptr;
1255 if (method()->is_static()) {
1256 ciInstance* mirror = _method->holder()->java_mirror();
1257 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1258 lock_obj = makecon(t_lock);
1259 } else { // Else pass the "this" pointer,
1260 lock_obj = local(0); // which is Parm0 from StartNode
1261 }
1262 // Clear out dead values from the debug info.
1263 kill_dead_locals();
1264 // Build the FastLockNode
1265 _synch_lock = shared_lock(lock_obj);
1266 // Check for bailout in shared_lock
1267 if (failing()) { return; }
1268 }
1269
1270 // Feed profiling data for parameters to the type system so it can
1271 // propagate it as speculative types
1272 record_profiled_parameters_for_speculation();
1273 }
1274
1275 //------------------------------init_blocks------------------------------------
1276 // Initialize our parser map to contain the types/monitors at method entry.
1277 void Parse::init_blocks() {
1278 // Create the blocks.
1279 _block_count = flow()->block_count();
1280 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1676 //--------------------handle_missing_successor---------------------------------
1677 void Parse::handle_missing_successor(int target_bci) {
1678 #ifndef PRODUCT
1679 Block* b = block();
1680 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1681 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1682 #endif
1683 ShouldNotReachHere();
1684 }
1685
1686 //--------------------------merge_common---------------------------------------
1687 void Parse::merge_common(Parse::Block* target, int pnum) {
1688 if (TraceOptoParse) {
1689 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1690 }
1691
1692 // Zap extra stack slots to top
1693 assert(sp() == target->start_sp(), "");
1694 clean_stack(sp());
1695
1696 if (!target->is_merged()) { // No prior mapping at this bci
1697 if (TraceOptoParse) { tty->print(" with empty state"); }
1698
1699 // If this path is dead, do not bother capturing it as a merge.
1700 // It is "as if" we had 1 fewer predecessors from the beginning.
1701 if (stopped()) {
1702 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1703 return;
1704 }
1705
1706 // Make a region if we know there are multiple or unpredictable inputs.
1707 // (Also, if this is a plain fall-through, we might see another region,
1708 // which must not be allowed into this block's map.)
1709 if (pnum > PhiNode::Input // Known multiple inputs.
1710 || target->is_handler() // These have unpredictable inputs.
1711 || target->is_loop_head() // Known multiple inputs
1712 || control()->is_Region()) { // We must hide this guy.
1713
1714 int current_bci = bci();
1715 set_parse_bci(target->start()); // Set target bci
1730 record_for_igvn(r);
1731 // zap all inputs to null for debugging (done in Node(uint) constructor)
1732 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1733 r->init_req(pnum, control());
1734 set_control(r);
1735 target->copy_irreducible_status_to(r, jvms());
1736 set_parse_bci(current_bci); // Restore bci
1737 }
1738
1739 // Convert the existing Parser mapping into a mapping at this bci.
1740 store_state_to(target);
1741 assert(target->is_merged(), "do not come here twice");
1742
1743 } else { // Prior mapping at this bci
1744 if (TraceOptoParse) { tty->print(" with previous state"); }
1745 #ifdef ASSERT
1746 if (target->is_SEL_head()) {
1747 target->mark_merged_backedge(block());
1748 }
1749 #endif
1750 // We must not manufacture more phis if the target is already parsed.
1751 bool nophi = target->is_parsed();
1752
1753 SafePointNode* newin = map();// Hang on to incoming mapping
1754 Block* save_block = block(); // Hang on to incoming block;
1755 load_state_from(target); // Get prior mapping
1756
1757 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1758 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1759 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1760 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1761
1762 // Iterate over my current mapping and the old mapping.
1763 // Where different, insert Phi functions.
1764 // Use any existing Phi functions.
1765 assert(control()->is_Region(), "must be merging to a region");
1766 RegionNode* r = control()->as_Region();
1767
1768 // Compute where to merge into
1769 // Merge incoming control path
1770 r->init_req(pnum, newin->control());
1771
1772 if (pnum == 1) { // Last merge for this Region?
1773 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1774 Node* result = _gvn.transform(r);
1775 if (r != result && TraceOptoParse) {
1776 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1777 }
1778 }
1779 record_for_igvn(r);
1780 }
1781
1782 // Update all the non-control inputs to map:
1783 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1784 bool check_elide_phi = target->is_SEL_backedge(save_block);
1785 for (uint j = 1; j < newin->req(); j++) {
1786 Node* m = map()->in(j); // Current state of target.
1787 Node* n = newin->in(j); // Incoming change to target state.
1788 PhiNode* phi;
1789 if (m->is_Phi() && m->as_Phi()->region() == r)
1790 phi = m->as_Phi();
1791 else
1792 phi = nullptr;
1793 if (m != n) { // Different; must merge
1794 switch (j) {
1795 // Frame pointer and Return Address never changes
1796 case TypeFunc::FramePtr:// Drop m, use the original value
1797 case TypeFunc::ReturnAdr:
1798 break;
1799 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1800 assert(phi == nullptr, "the merge contains phis, not vice versa");
1801 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1802 continue;
1803 default: // All normal stuff
1804 if (phi == nullptr) {
1805 const JVMState* jvms = map()->jvms();
1806 if (EliminateNestedLocks &&
1807 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1808 // BoxLock nodes are not commoning.
1809 // Use old BoxLock node as merged box.
1810 assert(newin->jvms()->is_monitor_box(j), "sanity");
1811 // This assert also tests that nodes are BoxLock.
1812 assert(BoxLockNode::same_slot(n, m), "sanity");
1813 C->gvn_replace_by(n, m);
1814 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1815 phi = ensure_phi(j, nophi);
1816 }
1817 }
1818 break;
1819 }
1820 }
1821 // At this point, n might be top if:
1822 // - there is no phi (because TypeFlow detected a conflict), or
1823 // - the corresponding control edges is top (a dead incoming path)
1824 // It is a bug if we create a phi which sees a garbage value on a live path.
1825
1826 if (phi != nullptr) {
1827 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1828 assert(phi->region() == r, "");
1829 phi->set_req(pnum, n); // Then add 'n' to the merge
1830 if (pnum == PhiNode::Input) {
1831 // Last merge for this Phi.
1832 // So far, Phis have had a reasonable type from ciTypeFlow.
1833 // Now _gvn will join that with the meet of current inputs.
1834 // BOTTOM is never permissible here, 'cause pessimistically
1835 // Phis of pointers cannot lose the basic pointer type.
1836 debug_only(const Type* bt1 = phi->bottom_type());
1837 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1838 map()->set_req(j, _gvn.transform(phi));
1839 debug_only(const Type* bt2 = phi->bottom_type());
1840 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1841 record_for_igvn(phi);
1842 }
1843 }
1844 } // End of for all values to be merged
1845
1846 if (pnum == PhiNode::Input &&
1847 !r->in(0)) { // The occasional useless Region
1848 assert(control() == r, "");
1849 set_control(r->nonnull_req());
1850 }
1851
1852 map()->merge_replaced_nodes_with(newin);
1853
1854 // newin has been subsumed into the lazy merge, and is now dead.
1855 set_block(save_block);
1856
1857 stop(); // done with this guy, for now
1858 }
1859
1860 if (TraceOptoParse) {
1861 tty->print_cr(" on path %d", pnum);
1862 }
1863
1864 // Done with this parser state.
1865 assert(stopped(), "");
1866 }
1867
1979
1980 // Add new path to the region.
1981 uint pnum = r->req();
1982 r->add_req(nullptr);
1983
1984 for (uint i = 1; i < map->req(); i++) {
1985 Node* n = map->in(i);
1986 if (i == TypeFunc::Memory) {
1987 // Ensure a phi on all currently known memories.
1988 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1989 Node* phi = mms.memory();
1990 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1991 assert(phi->req() == pnum, "must be same size as region");
1992 phi->add_req(nullptr);
1993 }
1994 }
1995 } else {
1996 if (n->is_Phi() && n->as_Phi()->region() == r) {
1997 assert(n->req() == pnum, "must be same size as region");
1998 n->add_req(nullptr);
1999 }
2000 }
2001 }
2002
2003 return pnum;
2004 }
2005
2006 //------------------------------ensure_phi-------------------------------------
2007 // Turn the idx'th entry of the current map into a Phi
2008 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2009 SafePointNode* map = this->map();
2010 Node* region = map->control();
2011 assert(region->is_Region(), "");
2012
2013 Node* o = map->in(idx);
2014 assert(o != nullptr, "");
2015
2016 if (o == top()) return nullptr; // TOP always merges into TOP
2017
2018 if (o->is_Phi() && o->as_Phi()->region() == region) {
2019 return o->as_Phi();
2020 }
2021
2022 // Now use a Phi here for merging
2023 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2024 const JVMState* jvms = map->jvms();
2025 const Type* t = nullptr;
2026 if (jvms->is_loc(idx)) {
2027 t = block()->local_type_at(idx - jvms->locoff());
2028 } else if (jvms->is_stk(idx)) {
2029 t = block()->stack_type_at(idx - jvms->stkoff());
2030 } else if (jvms->is_mon(idx)) {
2031 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2032 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2033 } else if ((uint)idx < TypeFunc::Parms) {
2034 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2035 } else {
2036 assert(false, "no type information for this phi");
2037 }
2038
2039 // If the type falls to bottom, then this must be a local that
2040 // is mixing ints and oops or some such. Forcing it to top
2041 // makes it go dead.
2042 if (t == Type::BOTTOM) {
2043 map->set_req(idx, top());
2044 return nullptr;
2045 }
2046
2047 // Do not create phis for top either.
2048 // A top on a non-null control flow must be an unused even after the.phi.
2049 if (t == Type::TOP || t == Type::HALF) {
2050 map->set_req(idx, top());
2051 return nullptr;
2052 }
2053
2054 PhiNode* phi = PhiNode::make(region, o, t);
2055 gvn().set_type(phi, t);
2056 if (C->do_escape_analysis()) record_for_igvn(phi);
2057 map->set_req(idx, phi);
2058 return phi;
2059 }
2060
2061 //--------------------------ensure_memory_phi----------------------------------
2062 // Turn the idx'th slice of the current memory into a Phi
2063 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2064 MergeMemNode* mem = merged_memory();
2065 Node* region = control();
2066 assert(region->is_Region(), "");
2067
2068 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2069 assert(o != nullptr && o != top(), "");
2070
2071 PhiNode* phi;
2072 if (o->is_Phi() && o->as_Phi()->region() == region) {
2073 phi = o->as_Phi();
2074 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2075 // clone the shared base memory phi to make a new memory split
2076 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2077 const Type* t = phi->bottom_type();
2078 const TypePtr* adr_type = C->get_adr_type(idx);
2206 Node* chk = _gvn.transform( new CmpINode(opq, profile_state) );
2207 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
2208 // Branch to failure if state was changed
2209 { BuildCutout unless(this, tst, PROB_ALWAYS);
2210 uncommon_trap(Deoptimization::Reason_rtm_state_change,
2211 Deoptimization::Action_make_not_entrant);
2212 }
2213 }
2214 #endif
2215 }
2216
2217 //------------------------------return_current---------------------------------
2218 // Append current _map to _exit_return
2219 void Parse::return_current(Node* value) {
2220 if (RegisterFinalizersAtInit &&
2221 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2222 call_register_finalizer();
2223 }
2224
2225 // Do not set_parse_bci, so that return goo is credited to the return insn.
2226 set_bci(InvocationEntryBci);
2227 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2228 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2229 }
2230 if (C->env()->dtrace_method_probes()) {
2231 make_dtrace_method_exit(method());
2232 }
2233 SafePointNode* exit_return = _exits.map();
2234 exit_return->in( TypeFunc::Control )->add_req( control() );
2235 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2236 Node *mem = exit_return->in( TypeFunc::Memory );
2237 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2238 if (mms.is_empty()) {
2239 // get a copy of the base memory, and patch just this one input
2240 const TypePtr* adr_type = mms.adr_type(C);
2241 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2242 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2243 gvn().set_type_bottom(phi);
2244 phi->del_req(phi->req()-1); // prepare to re-patch
2245 mms.set_memory(phi);
2246 }
2247 mms.memory()->add_req(mms.memory2());
2248 }
2249
2250 // frame pointer is always same, already captured
2251 if (value != nullptr) {
2252 // If returning oops to an interface-return, there is a silent free
2253 // cast from oop to interface allowed by the Verifier. Make it explicit
2254 // here.
2255 Node* phi = _exits.argument(0);
2256 phi->add_req(value);
2257 }
2258
2259 if (_first_return) {
2260 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2261 _first_return = false;
2262 } else {
2263 _exits.map()->merge_replaced_nodes_with(map());
2264 }
2265
2266 stop_and_kill_map(); // This CFG path dies here
2267 }
2268
2269
2270 //------------------------------add_safepoint----------------------------------
2271 void Parse::add_safepoint() {
2272 uint parms = TypeFunc::Parms+1;
2273
2274 // Clear out dead values from the debug info.
2275 kill_dead_locals();
2276
2277 // Clone the JVM State
2278 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/method.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/c2compiler.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/convertnode.hpp"
34 #include "opto/idealGraphPrinter.hpp"
35 #include "opto/inlinetypenode.hpp"
36 #include "opto/locknode.hpp"
37 #include "opto/memnode.hpp"
38 #include "opto/opaquenode.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/type.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/safepointMechanism.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/bitMap.inline.hpp"
47 #include "utilities/copy.hpp"
48
49 // Static array so we can figure out which bytecodes stop us from compiling
50 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
51 // and eventually should be encapsulated in a proper class (gri 8/18/98).
52
53 #ifndef PRODUCT
54 uint nodes_created = 0;
55 uint methods_parsed = 0;
87 }
88 if (all_null_checks_found) {
89 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
90 (100*implicit_null_checks)/all_null_checks_found);
91 }
92 if (SharedRuntime::_implicit_null_throws) {
93 tty->print_cr("%u implicit null exceptions at runtime",
94 SharedRuntime::_implicit_null_throws);
95 }
96
97 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
98 BytecodeParseHistogram::print();
99 }
100 }
101 #endif
102
103 //------------------------------ON STACK REPLACEMENT---------------------------
104
105 // Construct a node which can be used to get incoming state for
106 // on stack replacement.
107 Node* Parse::fetch_interpreter_state(int index,
108 const Type* type,
109 Node* local_addrs,
110 Node* local_addrs_base) {
111 BasicType bt = type->basic_type();
112 if (type == TypePtr::NULL_PTR) {
113 // Ptr types are mixed together with T_ADDRESS but nullptr is
114 // really for T_OBJECT types so correct it.
115 bt = T_OBJECT;
116 }
117 Node *mem = memory(Compile::AliasIdxRaw);
118 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
119 Node *ctl = control();
120
121 // Very similar to LoadNode::make, except we handle un-aligned longs and
122 // doubles on Sparc. Intel can handle them just fine directly.
123 Node *l = nullptr;
124 switch (bt) { // Signature is flattened
125 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
126 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
127 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
128 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
129 case T_LONG:
130 case T_DOUBLE: {
131 // Since arguments are in reverse order, the argument address 'adr'
132 // refers to the back half of the long/double. Recompute adr.
133 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
134 if (Matcher::misaligned_doubles_ok) {
135 l = (bt == T_DOUBLE)
136 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
138 } else {
139 l = (bt == T_DOUBLE)
140 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
141 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
142 }
143 break;
144 }
145 default: ShouldNotReachHere();
146 }
147 return _gvn.transform(l);
148 }
149
150 // Helper routine to prevent the interpreter from handing
151 // unexpected typestate to an OSR method.
152 // The Node l is a value newly dug out of the interpreter frame.
153 // The type is the type predicted by ciTypeFlow. Note that it is
154 // not a general type, but can only come from Type::get_typeflow_type.
155 // The safepoint is a map which will feed an uncommon trap.
156 Node* Parse::check_interpreter_type(Node* l, const Type* type,
157 SafePointNode* &bad_type_exit) {
158 const TypeOopPtr* tp = type->isa_oopptr();
159
160 // TypeFlow may assert null-ness if a type appears unloaded.
161 if (type == TypePtr::NULL_PTR ||
162 (tp != nullptr && !tp->is_loaded())) {
163 // Value must be null, not a real oop.
164 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
165 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
166 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
167 set_control(_gvn.transform( new IfTrueNode(iff) ));
168 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
169 bad_type_exit->control()->add_req(bad_type);
170 l = null();
171 }
172
173 // Typeflow can also cut off paths from the CFG, based on
174 // types which appear unloaded, or call sites which appear unlinked.
175 // When paths are cut off, values at later merge points can rise
176 // toward more specific classes. Make sure these specific classes
177 // are still in effect.
178 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
179 // TypeFlow asserted a specific object type. Value must have that type.
180 Node* bad_type_ctrl = nullptr;
181 if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
182 // TODO 8325106 Dead code?
183 // Check inline types for null here to prevent checkcast from adding an
184 // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
185 l = null_check_oop(l, &bad_type_ctrl);
186 bad_type_exit->control()->add_req(bad_type_ctrl);
187 }
188 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
189 bad_type_exit->control()->add_req(bad_type_ctrl);
190 }
191
192 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
193 return l;
194 }
195
196 // Helper routine which sets up elements of the initial parser map when
197 // performing a parse for on stack replacement. Add values into map.
198 // The only parameter contains the address of a interpreter arguments.
199 void Parse::load_interpreter_state(Node* osr_buf) {
200 int index;
201 int max_locals = jvms()->loc_size();
202 int max_stack = jvms()->stk_size();
203
204 // Mismatch between method and jvms can occur since map briefly held
205 // an OSR entry state (which takes up one RawPtr word).
206 assert(max_locals == method()->max_locals(), "sanity");
207 assert(max_stack >= method()->max_stack(), "sanity");
208 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
209 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
210
211 // Find the start block.
212 Block* osr_block = start_block();
213 assert(osr_block->start() == osr_bci(), "sanity");
214
215 // Set initial BCI.
216 set_parse_bci(osr_block->start());
217
218 // Set initial stack depth.
219 set_sp(osr_block->start_sp());
220
221 // Check bailouts. We currently do not perform on stack replacement
222 // of loops in catch blocks or loops which branch with a non-empty stack.
223 if (sp() != 0) {
225 return;
226 }
227 // Do not OSR inside finally clauses:
228 if (osr_block->has_trap_at(osr_block->start())) {
229 assert(false, "OSR starts with an immediate trap");
230 C->record_method_not_compilable("OSR starts with an immediate trap");
231 return;
232 }
233
234 // Commute monitors from interpreter frame to compiler frame.
235 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
236 int mcnt = osr_block->flow()->monitor_count();
237 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
238 for (index = 0; index < mcnt; index++) {
239 // Make a BoxLockNode for the monitor.
240 Node *box = new BoxLockNode(next_monitor());
241 // Check for bailout after new BoxLockNode
242 if (failing()) { return; }
243 box = _gvn.transform(box);
244
245 // Displaced headers and locked objects are interleaved in the
246 // temp OSR buffer. We only copy the locked objects out here.
247 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
248 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
249 // Try and copy the displaced header to the BoxNode
250 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
251
252 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
253
254 // Build a bogus FastLockNode (no code will be generated) and push the
255 // monitor into our debug info.
256 const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
257 map()->push_monitor(flock);
258
259 // If the lock is our method synchronization lock, tuck it away in
260 // _sync_lock for return and rethrow exit paths.
261 if (index == 0 && method()->is_synchronized()) {
262 _synch_lock = flock;
263 }
264 }
265
266 // Use the raw liveness computation to make sure that unexpected
267 // values don't propagate into the OSR frame.
268 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
269 if (!live_locals.is_valid()) {
270 // Degenerate or breakpointed method.
298 if (C->log() != nullptr) {
299 C->log()->elem("OSR_mismatch local_index='%d'",index);
300 }
301 set_local(index, null());
302 // and ignore it for the loads
303 continue;
304 }
305 }
306
307 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
308 if (type == Type::TOP || type == Type::HALF) {
309 continue;
310 }
311 // If the type falls to bottom, then this must be a local that
312 // is mixing ints and oops or some such. Forcing it to top
313 // makes it go dead.
314 if (type == Type::BOTTOM) {
315 continue;
316 }
317 // Construct code to access the appropriate local.
318 Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
319 set_local(index, value);
320 }
321
322 // Extract the needed stack entries from the interpreter frame.
323 for (index = 0; index < sp(); index++) {
324 const Type *type = osr_block->stack_type_at(index);
325 if (type != Type::TOP) {
326 // Currently the compiler bails out when attempting to on stack replace
327 // at a bci with a non-empty stack. We should not reach here.
328 ShouldNotReachHere();
329 }
330 }
331
332 // End the OSR migration
333 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
334 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
335 "OSR_migration_end", TypeRawPtr::BOTTOM,
336 osr_buf);
337
338 // Now that the interpreter state is loaded, make sure it will match
494 // either breakpoint setting or hotswapping of methods may
495 // cause deoptimization.
496 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
497 C->dependencies()->assert_evol_method(method());
498 }
499
500 NOT_PRODUCT(methods_seen++);
501
502 // Do some special top-level things.
503 if (depth() == 1 && C->is_osr_compilation()) {
504 _tf = C->tf(); // the OSR entry type is different
505 _entry_bci = C->entry_bci();
506 _flow = method()->get_osr_flow_analysis(osr_bci());
507 } else {
508 _tf = TypeFunc::make(method());
509 _entry_bci = InvocationEntryBci;
510 _flow = method()->get_flow_analysis();
511 }
512
513 if (_flow->failing()) {
514 // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
515 // can lead to this. Re-enable once 8284443 is fixed.
516 //assert(false, "type flow analysis failed during parsing");
517 C->record_method_not_compilable(_flow->failure_reason());
518 #ifndef PRODUCT
519 if (PrintOpto && (Verbose || WizardMode)) {
520 if (is_osr_parse()) {
521 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
522 } else {
523 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
524 }
525 if (Verbose) {
526 method()->print();
527 method()->print_codes();
528 _flow->print();
529 }
530 }
531 #endif
532 }
533
534 #ifdef ASSERT
535 if (depth() == 1) {
536 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
590 do_method_entry();
591 }
592
593 if (depth() == 1 && !failing()) {
594 if (C->clinit_barrier_on_entry()) {
595 // Add check to deoptimize the nmethod once the holder class is fully initialized
596 clinit_deopt();
597 }
598
599 // Add check to deoptimize the nmethod if RTM state was changed
600 rtm_deopt();
601 }
602
603 // Check for bailouts during method entry or RTM state check setup.
604 if (failing()) {
605 if (log) log->done("parse");
606 C->set_default_node_notes(caller_nn);
607 return;
608 }
609
610 // Handle inline type arguments
611 int arg_size = method()->arg_size();
612 for (int i = 0; i < arg_size; i++) {
613 Node* parm = local(i);
614 const Type* t = _gvn.type(parm);
615 if (t->is_inlinetypeptr()) {
616 // Create InlineTypeNode from the oop and replace the parameter
617 bool is_larval = (i == 0) && method()->is_object_constructor() && !method()->holder()->is_abstract() && !method()->holder()->is_java_lang_Object();
618 Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass(), !t->maybe_null(), is_larval);
619 replace_in_map(parm, vt);
620 } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() &&
621 t->isa_aryptr() != nullptr && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_not_null_free()) {
622 // Speculate on varargs Object array being not null-free (and therefore also not flat)
623 const TypePtr* spec_type = t->speculative();
624 spec_type = (spec_type != nullptr && spec_type->isa_aryptr() != nullptr) ? spec_type : t->is_aryptr();
625 spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free();
626 spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
627 Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
628 // TODO 8325106 Shouldn't we use replace_in_map here?
629 set_local(i, cast);
630 }
631 }
632
633 entry_map = map(); // capture any changes performed by method setup code
634 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
635
636 // We begin parsing as if we have just encountered a jump to the
637 // method entry.
638 Block* entry_block = start_block();
639 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
640 set_map_clone(entry_map);
641 merge_common(entry_block, entry_block->next_path_num());
642
643 #ifndef PRODUCT
644 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
645 set_parse_histogram( parse_histogram_obj );
646 #endif
647
648 // Parse all the basic blocks.
649 do_all_blocks();
650
651 // Check for bailouts during conversion to graph
652 if (failing()) {
798 void Parse::build_exits() {
799 // make a clone of caller to prevent sharing of side-effects
800 _exits.set_map(_exits.clone_map());
801 _exits.clean_stack(_exits.sp());
802 _exits.sync_jvms();
803
804 RegionNode* region = new RegionNode(1);
805 record_for_igvn(region);
806 gvn().set_type_bottom(region);
807 _exits.set_control(region);
808
809 // Note: iophi and memphi are not transformed until do_exits.
810 Node* iophi = new PhiNode(region, Type::ABIO);
811 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
812 gvn().set_type_bottom(iophi);
813 gvn().set_type_bottom(memphi);
814 _exits.set_i_o(iophi);
815 _exits.set_all_memory(memphi);
816
817 // Add a return value to the exit state. (Do not push it yet.)
818 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
819 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
820 if (ret_type->isa_int()) {
821 BasicType ret_bt = method()->return_type()->basic_type();
822 if (ret_bt == T_BOOLEAN ||
823 ret_bt == T_CHAR ||
824 ret_bt == T_BYTE ||
825 ret_bt == T_SHORT) {
826 ret_type = TypeInt::INT;
827 }
828 }
829
830 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
831 // becomes loaded during the subsequent parsing, the loaded and unloaded
832 // types will not join when we transform and push in do_exits().
833 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
834 if (ret_oop_type && !ret_oop_type->is_loaded()) {
835 ret_type = TypeOopPtr::BOTTOM;
836 }
837 int ret_size = type2size[ret_type->basic_type()];
838 Node* ret_phi = new PhiNode(region, ret_type);
839 gvn().set_type_bottom(ret_phi);
840 _exits.ensure_stack(ret_size);
841 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
842 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
843 _exits.set_argument(0, ret_phi); // here is where the parser finds it
844 // Note: ret_phi is not yet pushed, until do_exits.
845 }
846 }
847
848 //----------------------------build_start_state-------------------------------
849 // Construct a state which contains only the incoming arguments from an
850 // unknown caller. The method & bci will be null & InvocationEntryBci.
851 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
852 int arg_size = tf->domain_sig()->cnt();
853 int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
854 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
855 SafePointNode* map = new SafePointNode(max_size, jvms);
856 jvms->set_map(map);
857 record_for_igvn(map);
858 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
859 Node_Notes* old_nn = default_node_notes();
860 if (old_nn != nullptr && has_method()) {
861 Node_Notes* entry_nn = old_nn->clone(this);
862 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
863 entry_jvms->set_offsets(0);
864 entry_jvms->set_bci(entry_bci());
865 entry_nn->set_jvms(entry_jvms);
866 set_default_node_notes(entry_nn);
867 }
868 PhaseGVN& gvn = *initial_gvn();
869 uint i = 0;
870 int arg_num = 0;
871 for (uint j = 0; i < (uint)arg_size; i++) {
872 const Type* t = tf->domain_sig()->field_at(i);
873 Node* parm = nullptr;
874 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
875 // Inline type arguments are not passed by reference: we get an argument per
876 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
877 GraphKit kit(jvms, &gvn);
878 kit.set_control(map->control());
879 Node* old_mem = map->memory();
880 // Use immutable memory for inline type loads and restore it below
881 kit.set_all_memory(C->immutable_memory());
882 parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
883 map->set_control(kit.control());
884 map->set_memory(old_mem);
885 } else {
886 parm = gvn.transform(new ParmNode(start, j++));
887 }
888 map->init_req(i, parm);
889 // Record all these guys for later GVN.
890 record_for_igvn(parm);
891 if (i >= TypeFunc::Parms && t != Type::HALF) {
892 arg_num++;
893 }
894 }
895 for (; i < map->req(); i++) {
896 map->init_req(i, top());
897 }
898 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
899 set_default_node_notes(old_nn);
900 return jvms;
901 }
902
903 //-----------------------------make_node_notes---------------------------------
904 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
905 if (caller_nn == nullptr) return nullptr;
906 Node_Notes* nn = caller_nn->clone(C);
907 JVMState* caller_jvms = nn->jvms();
908 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
909 jvms->set_offsets(0);
910 jvms->set_bci(_entry_bci);
911 nn->set_jvms(jvms);
912 return nn;
913 }
914
915
916 //--------------------------return_values--------------------------------------
917 void Compile::return_values(JVMState* jvms) {
918 GraphKit kit(jvms);
919 Node* ret = new ReturnNode(TypeFunc::Parms,
920 kit.control(),
921 kit.i_o(),
922 kit.reset_memory(),
923 kit.frameptr(),
924 kit.returnadr());
925 // Add zero or 1 return values
926 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
927 if (ret_size > 0) {
928 kit.inc_sp(-ret_size); // pop the return value(s)
929 kit.sync_jvms();
930 Node* res = kit.argument(0);
931 if (tf()->returns_inline_type_as_fields()) {
932 // Multiple return values (inline type fields): add as many edges
933 // to the Return node as returned values.
934 InlineTypeNode* vt = res->as_InlineType();
935 ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
936 if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
937 ret->init_req(TypeFunc::Parms, vt);
938 } else {
939 // Return the tagged klass pointer to signal scalarization to the caller
940 Node* tagged_klass = vt->tagged_klass(kit.gvn());
941 // Return null if the inline type is null (IsInit field is not set)
942 Node* conv = kit.gvn().transform(new ConvI2LNode(vt->get_is_init()));
943 Node* shl = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
944 Node* shr = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
945 tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
946 ret->init_req(TypeFunc::Parms, tagged_klass);
947 }
948 uint idx = TypeFunc::Parms + 1;
949 vt->pass_fields(&kit, ret, idx, false, false);
950 } else {
951 ret->add_req(res);
952 // Note: The second dummy edge is not needed by a ReturnNode.
953 }
954 }
955 // bind it to root
956 root()->add_req(ret);
957 record_for_igvn(ret);
958 initial_gvn()->transform(ret);
959 }
960
961 //------------------------rethrow_exceptions-----------------------------------
962 // Bind all exception states in the list into a single RethrowNode.
963 void Compile::rethrow_exceptions(JVMState* jvms) {
964 GraphKit kit(jvms);
965 if (!kit.has_exceptions()) return; // nothing to generate
966 // Load my combined exception state into the kit, with all phis transformed:
967 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
968 Node* ex_oop = kit.use_exception_state(ex_map);
969 RethrowNode* exit = new RethrowNode(kit.control(),
970 kit.i_o(), kit.reset_memory(),
971 kit.frameptr(), kit.returnadr(),
972 // like a return but with exception input
973 ex_oop);
1057 // to complete, we force all writes to complete.
1058 //
1059 // 2. Experimental VM option is used to force the barrier if any field
1060 // was written out in the constructor.
1061 //
1062 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1063 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1064 // MemBarVolatile is used before volatile load instead of after volatile
1065 // store, so there's no barrier after the store.
1066 // We want to guarantee the same behavior as on platforms with total store
1067 // order, although this is not required by the Java memory model.
1068 // In this case, we want to enforce visibility of volatile field
1069 // initializations which are performed in constructors.
1070 // So as with finals, we add a barrier here.
1071 //
1072 // "All bets are off" unless the first publication occurs after a
1073 // normal return from the constructor. We do not attempt to detect
1074 // such unusual early publications. But no barrier is needed on
1075 // exceptional returns, since they cannot publish normally.
1076 //
1077 if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1078 (wrote_final() ||
1079 (AlwaysSafeConstructors && wrote_fields()) ||
1080 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1081 _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1082
1083 // If Memory barrier is created for final fields write
1084 // and allocation node does not escape the initialize method,
1085 // then barrier introduced by allocation node can be removed.
1086 if (DoEscapeAnalysis && alloc_with_final()) {
1087 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final());
1088 alloc->compute_MemBar_redundancy(method());
1089 }
1090 if (PrintOpto && (Verbose || WizardMode)) {
1091 method()->print_name();
1092 tty->print_cr(" writes finals and needs a memory barrier");
1093 }
1094 }
1095
1096 // Any method can write a @Stable field; insert memory barriers
1097 // after those also. Can't bind predecessor allocation node (if any)
1098 // with barrier because allocation doesn't always dominate
1099 // MemBarRelease.
1100 if (wrote_stable()) {
1101 _exits.insert_mem_bar(Op_MemBarRelease);
1102 if (PrintOpto && (Verbose || WizardMode)) {
1103 method()->print_name();
1104 tty->print_cr(" writes @Stable and needs a memory barrier");
1105 }
1106 }
1107
1108 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1109 // transform each slice of the original memphi:
1110 mms.set_memory(_gvn.transform(mms.memory()));
1111 }
1112 // Clean up input MergeMems created by transforming the slices
1113 _gvn.transform(_exits.merged_memory());
1114
1115 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1116 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1117 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1118 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1119 // If the type we set for the ret_phi in build_exits() is too optimistic and
1120 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1121 // loading. It could also be due to an error, so mark this method as not compilable because
1122 // otherwise this could lead to an infinite compile loop.
1123 // In any case, this code path is rarely (and never in my testing) reached.
1124 #ifdef ASSERT
1125 tty->print_cr("# Can't determine return type.");
1126 tty->print_cr("# exit control");
1127 _exits.control()->dump(2);
1128 tty->print_cr("# ret phi type");
1129 _gvn.type(ret_phi)->dump();
1130 tty->print_cr("# ret phi");
1131 ret_phi->dump(2);
1132 #endif // ASSERT
1133 assert(false, "Can't determine return type.");
1134 C->record_method_not_compilable("Can't determine return type.");
1135 return;
1136 }
1200
1201 //-----------------------------create_entry_map-------------------------------
1202 // Initialize our parser map to contain the types at method entry.
1203 // For OSR, the map contains a single RawPtr parameter.
1204 // Initial monitor locking for sync. methods is performed by do_method_entry.
1205 SafePointNode* Parse::create_entry_map() {
1206 // Check for really stupid bail-out cases.
1207 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1208 if (len >= 32760) {
1209 // Bailout expected, this is a very rare edge case.
1210 C->record_method_not_compilable("too many local variables");
1211 return nullptr;
1212 }
1213
1214 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1215 _caller->map()->delete_replaced_nodes();
1216
1217 // If this is an inlined method, we may have to do a receiver null check.
1218 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1219 GraphKit kit(_caller);
1220 Node* receiver = kit.argument(0);
1221 Node* null_free = kit.null_check_receiver_before_call(method());
1222 _caller = kit.transfer_exceptions_into_jvms();
1223 if (receiver->is_InlineType() && receiver->as_InlineType()->is_larval()) {
1224 // Replace the larval inline type receiver in the exit map as well to make sure that
1225 // we can find and update it in Parse::do_call when we are done with the initialization.
1226 _exits.map()->replace_edge(receiver, null_free);
1227 }
1228 if (kit.stopped()) {
1229 _exits.add_exception_states_from(_caller);
1230 _exits.set_jvms(_caller);
1231 return nullptr;
1232 }
1233 }
1234
1235 assert(method() != nullptr, "parser must have a method");
1236
1237 // Create an initial safepoint to hold JVM state during parsing
1238 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1239 set_map(new SafePointNode(len, jvms));
1240 jvms->set_map(map());
1241 record_for_igvn(map());
1242 assert(jvms->endoff() == len, "correct jvms sizing");
1243
1244 SafePointNode* inmap = _caller->map();
1245 assert(inmap != nullptr, "must have inmap");
1246 // In case of null check on receiver above
1247 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1248
1249 uint i;
1250
1251 // Pass thru the predefined input parameters.
1252 for (i = 0; i < TypeFunc::Parms; i++) {
1253 map()->init_req(i, inmap->in(i));
1254 }
1255
1256 if (depth() == 1) {
1257 assert(map()->memory()->Opcode() == Op_Parm, "");
1258 // Insert the memory aliasing node
1259 set_all_memory(reset_memory());
1260 }
1261 assert(merged_memory(), "");
1262
1263 // Now add the locals which are initially bound to arguments:
1264 uint arg_size = tf()->domain_sig()->cnt();
1265 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1266 for (i = TypeFunc::Parms; i < arg_size; i++) {
1267 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1268 }
1269
1270 // Clear out the rest of the map (locals and stack)
1271 for (i = arg_size; i < len; i++) {
1272 map()->init_req(i, top());
1273 }
1274
1275 SafePointNode* entry_map = stop();
1276 return entry_map;
1277 }
1278
1279 //-----------------------------do_method_entry--------------------------------
1280 // Emit any code needed in the pseudo-block before BCI zero.
1281 // The main thing to do is lock the receiver of a synchronized method.
1282 void Parse::do_method_entry() {
1283 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1284 set_sp(0); // Java Stack Pointer
1318
1319 // If the method is synchronized, we need to construct a lock node, attach
1320 // it to the Start node, and pin it there.
1321 if (method()->is_synchronized()) {
1322 // Insert a FastLockNode right after the Start which takes as arguments
1323 // the current thread pointer, the "this" pointer & the address of the
1324 // stack slot pair used for the lock. The "this" pointer is a projection
1325 // off the start node, but the locking spot has to be constructed by
1326 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1327 // becomes the second argument to the FastLockNode call. The
1328 // FastLockNode becomes the new control parent to pin it to the start.
1329
1330 // Setup Object Pointer
1331 Node *lock_obj = nullptr;
1332 if (method()->is_static()) {
1333 ciInstance* mirror = _method->holder()->java_mirror();
1334 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1335 lock_obj = makecon(t_lock);
1336 } else { // Else pass the "this" pointer,
1337 lock_obj = local(0); // which is Parm0 from StartNode
1338 assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1339 }
1340 // Clear out dead values from the debug info.
1341 kill_dead_locals();
1342 // Build the FastLockNode
1343 _synch_lock = shared_lock(lock_obj);
1344 // Check for bailout in shared_lock
1345 if (failing()) { return; }
1346 }
1347
1348 // Feed profiling data for parameters to the type system so it can
1349 // propagate it as speculative types
1350 record_profiled_parameters_for_speculation();
1351 }
1352
1353 //------------------------------init_blocks------------------------------------
1354 // Initialize our parser map to contain the types/monitors at method entry.
1355 void Parse::init_blocks() {
1356 // Create the blocks.
1357 _block_count = flow()->block_count();
1358 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1754 //--------------------handle_missing_successor---------------------------------
1755 void Parse::handle_missing_successor(int target_bci) {
1756 #ifndef PRODUCT
1757 Block* b = block();
1758 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1759 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1760 #endif
1761 ShouldNotReachHere();
1762 }
1763
1764 //--------------------------merge_common---------------------------------------
1765 void Parse::merge_common(Parse::Block* target, int pnum) {
1766 if (TraceOptoParse) {
1767 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1768 }
1769
1770 // Zap extra stack slots to top
1771 assert(sp() == target->start_sp(), "");
1772 clean_stack(sp());
1773
1774 // Check for merge conflicts involving inline types
1775 JVMState* old_jvms = map()->jvms();
1776 int old_bci = bci();
1777 JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1778 tmp_jvms->set_should_reexecute(true);
1779 tmp_jvms->bind_map(map());
1780 // Execution needs to restart a the next bytecode (entry of next
1781 // block)
1782 if (target->is_merged() ||
1783 pnum > PhiNode::Input ||
1784 target->is_handler() ||
1785 target->is_loop_head()) {
1786 set_parse_bci(target->start());
1787 for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1788 Node* n = map()->in(j); // Incoming change to target state.
1789 const Type* t = nullptr;
1790 if (tmp_jvms->is_loc(j)) {
1791 t = target->local_type_at(j - tmp_jvms->locoff());
1792 } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1793 t = target->stack_type_at(j - tmp_jvms->stkoff());
1794 }
1795 if (t != nullptr && t != Type::BOTTOM) {
1796 if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1797 // Allocate inline type in src block to be able to merge it with oop in target block
1798 map()->set_req(j, n->as_InlineType()->buffer(this));
1799 } else if (!n->is_InlineType() && t->is_inlinetypeptr()) {
1800 // Scalarize null in src block to be able to merge it with inline type in target block
1801 assert(gvn().type(n)->is_zero_type(), "Should have been scalarized");
1802 map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1803 }
1804 }
1805 }
1806 }
1807 old_jvms->bind_map(map());
1808 set_parse_bci(old_bci);
1809
1810 if (!target->is_merged()) { // No prior mapping at this bci
1811 if (TraceOptoParse) { tty->print(" with empty state"); }
1812
1813 // If this path is dead, do not bother capturing it as a merge.
1814 // It is "as if" we had 1 fewer predecessors from the beginning.
1815 if (stopped()) {
1816 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1817 return;
1818 }
1819
1820 // Make a region if we know there are multiple or unpredictable inputs.
1821 // (Also, if this is a plain fall-through, we might see another region,
1822 // which must not be allowed into this block's map.)
1823 if (pnum > PhiNode::Input // Known multiple inputs.
1824 || target->is_handler() // These have unpredictable inputs.
1825 || target->is_loop_head() // Known multiple inputs
1826 || control()->is_Region()) { // We must hide this guy.
1827
1828 int current_bci = bci();
1829 set_parse_bci(target->start()); // Set target bci
1844 record_for_igvn(r);
1845 // zap all inputs to null for debugging (done in Node(uint) constructor)
1846 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1847 r->init_req(pnum, control());
1848 set_control(r);
1849 target->copy_irreducible_status_to(r, jvms());
1850 set_parse_bci(current_bci); // Restore bci
1851 }
1852
1853 // Convert the existing Parser mapping into a mapping at this bci.
1854 store_state_to(target);
1855 assert(target->is_merged(), "do not come here twice");
1856
1857 } else { // Prior mapping at this bci
1858 if (TraceOptoParse) { tty->print(" with previous state"); }
1859 #ifdef ASSERT
1860 if (target->is_SEL_head()) {
1861 target->mark_merged_backedge(block());
1862 }
1863 #endif
1864
1865 // We must not manufacture more phis if the target is already parsed.
1866 bool nophi = target->is_parsed();
1867
1868 SafePointNode* newin = map();// Hang on to incoming mapping
1869 Block* save_block = block(); // Hang on to incoming block;
1870 load_state_from(target); // Get prior mapping
1871
1872 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1873 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1874 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1875 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1876
1877 // Iterate over my current mapping and the old mapping.
1878 // Where different, insert Phi functions.
1879 // Use any existing Phi functions.
1880 assert(control()->is_Region(), "must be merging to a region");
1881 RegionNode* r = control()->as_Region();
1882
1883 // Compute where to merge into
1884 // Merge incoming control path
1885 r->init_req(pnum, newin->control());
1886
1887 if (pnum == 1) { // Last merge for this Region?
1888 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1889 Node* result = _gvn.transform(r);
1890 if (r != result && TraceOptoParse) {
1891 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1892 }
1893 }
1894 record_for_igvn(r);
1895 }
1896
1897 // Update all the non-control inputs to map:
1898 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1899 bool check_elide_phi = target->is_SEL_backedge(save_block);
1900 bool last_merge = (pnum == PhiNode::Input);
1901 for (uint j = 1; j < newin->req(); j++) {
1902 Node* m = map()->in(j); // Current state of target.
1903 Node* n = newin->in(j); // Incoming change to target state.
1904 PhiNode* phi;
1905 if (m->is_Phi() && m->as_Phi()->region() == r) {
1906 phi = m->as_Phi();
1907 } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1908 phi = m->as_InlineType()->get_oop()->as_Phi();
1909 } else {
1910 phi = nullptr;
1911 }
1912 if (m != n) { // Different; must merge
1913 switch (j) {
1914 // Frame pointer and Return Address never changes
1915 case TypeFunc::FramePtr:// Drop m, use the original value
1916 case TypeFunc::ReturnAdr:
1917 break;
1918 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1919 assert(phi == nullptr, "the merge contains phis, not vice versa");
1920 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1921 continue;
1922 default: // All normal stuff
1923 if (phi == nullptr) {
1924 const JVMState* jvms = map()->jvms();
1925 if (EliminateNestedLocks &&
1926 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1927 // BoxLock nodes are not commoning.
1928 // Use old BoxLock node as merged box.
1929 assert(newin->jvms()->is_monitor_box(j), "sanity");
1930 // This assert also tests that nodes are BoxLock.
1931 assert(BoxLockNode::same_slot(n, m), "sanity");
1932 C->gvn_replace_by(n, m);
1933 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1934 phi = ensure_phi(j, nophi);
1935 }
1936 }
1937 break;
1938 }
1939 }
1940 // At this point, n might be top if:
1941 // - there is no phi (because TypeFlow detected a conflict), or
1942 // - the corresponding control edges is top (a dead incoming path)
1943 // It is a bug if we create a phi which sees a garbage value on a live path.
1944
1945 // Merging two inline types?
1946 if (phi != nullptr && phi->bottom_type()->is_inlinetypeptr()) {
1947 // Reload current state because it may have been updated by ensure_phi
1948 m = map()->in(j);
1949 InlineTypeNode* vtm = m->as_InlineType(); // Current inline type
1950 InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
1951 assert(vtm->get_oop() == phi, "Inline type should have Phi input");
1952 if (TraceOptoParse) {
1953 #ifdef ASSERT
1954 tty->print_cr("\nMerging inline types");
1955 tty->print_cr("Current:");
1956 vtm->dump(2);
1957 tty->print_cr("Incoming:");
1958 vtn->dump(2);
1959 tty->cr();
1960 #endif
1961 }
1962 // Do the merge
1963 vtm->merge_with(&_gvn, vtn, pnum, last_merge);
1964 if (last_merge) {
1965 map()->set_req(j, _gvn.transform(vtm));
1966 record_for_igvn(vtm);
1967 }
1968 } else if (phi != nullptr) {
1969 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1970 assert(phi->region() == r, "");
1971 phi->set_req(pnum, n); // Then add 'n' to the merge
1972 if (last_merge) {
1973 // Last merge for this Phi.
1974 // So far, Phis have had a reasonable type from ciTypeFlow.
1975 // Now _gvn will join that with the meet of current inputs.
1976 // BOTTOM is never permissible here, 'cause pessimistically
1977 // Phis of pointers cannot lose the basic pointer type.
1978 debug_only(const Type* bt1 = phi->bottom_type());
1979 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1980 map()->set_req(j, _gvn.transform(phi));
1981 debug_only(const Type* bt2 = phi->bottom_type());
1982 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1983 record_for_igvn(phi);
1984 }
1985 }
1986 } // End of for all values to be merged
1987
1988 if (last_merge && !r->in(0)) { // The occasional useless Region
1989 assert(control() == r, "");
1990 set_control(r->nonnull_req());
1991 }
1992
1993 map()->merge_replaced_nodes_with(newin);
1994
1995 // newin has been subsumed into the lazy merge, and is now dead.
1996 set_block(save_block);
1997
1998 stop(); // done with this guy, for now
1999 }
2000
2001 if (TraceOptoParse) {
2002 tty->print_cr(" on path %d", pnum);
2003 }
2004
2005 // Done with this parser state.
2006 assert(stopped(), "");
2007 }
2008
2120
2121 // Add new path to the region.
2122 uint pnum = r->req();
2123 r->add_req(nullptr);
2124
2125 for (uint i = 1; i < map->req(); i++) {
2126 Node* n = map->in(i);
2127 if (i == TypeFunc::Memory) {
2128 // Ensure a phi on all currently known memories.
2129 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2130 Node* phi = mms.memory();
2131 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2132 assert(phi->req() == pnum, "must be same size as region");
2133 phi->add_req(nullptr);
2134 }
2135 }
2136 } else {
2137 if (n->is_Phi() && n->as_Phi()->region() == r) {
2138 assert(n->req() == pnum, "must be same size as region");
2139 n->add_req(nullptr);
2140 } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2141 n->as_InlineType()->add_new_path(r);
2142 }
2143 }
2144 }
2145
2146 return pnum;
2147 }
2148
2149 //------------------------------ensure_phi-------------------------------------
2150 // Turn the idx'th entry of the current map into a Phi
2151 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2152 SafePointNode* map = this->map();
2153 Node* region = map->control();
2154 assert(region->is_Region(), "");
2155
2156 Node* o = map->in(idx);
2157 assert(o != nullptr, "");
2158
2159 if (o == top()) return nullptr; // TOP always merges into TOP
2160
2161 if (o->is_Phi() && o->as_Phi()->region() == region) {
2162 return o->as_Phi();
2163 }
2164 InlineTypeNode* vt = o->isa_InlineType();
2165 if (vt != nullptr && vt->has_phi_inputs(region)) {
2166 return vt->get_oop()->as_Phi();
2167 }
2168
2169 // Now use a Phi here for merging
2170 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2171 const JVMState* jvms = map->jvms();
2172 const Type* t = nullptr;
2173 if (jvms->is_loc(idx)) {
2174 t = block()->local_type_at(idx - jvms->locoff());
2175 } else if (jvms->is_stk(idx)) {
2176 t = block()->stack_type_at(idx - jvms->stkoff());
2177 } else if (jvms->is_mon(idx)) {
2178 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2179 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2180 } else if ((uint)idx < TypeFunc::Parms) {
2181 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2182 } else {
2183 assert(false, "no type information for this phi");
2184 }
2185
2186 // If the type falls to bottom, then this must be a local that
2187 // is already dead or is mixing ints and oops or some such.
2188 // Forcing it to top makes it go dead.
2189 if (t == Type::BOTTOM) {
2190 map->set_req(idx, top());
2191 return nullptr;
2192 }
2193
2194 // Do not create phis for top either.
2195 // A top on a non-null control flow must be an unused even after the.phi.
2196 if (t == Type::TOP || t == Type::HALF) {
2197 map->set_req(idx, top());
2198 return nullptr;
2199 }
2200
2201 if (vt != nullptr && t->is_inlinetypeptr()) {
2202 // Inline types are merged by merging their field values.
2203 // Create a cloned InlineTypeNode with phi inputs that
2204 // represents the merged inline type and update the map.
2205 // TODO 8325106 Why can't we pass map here?
2206 vt = vt->clone_with_phis(&_gvn, region);
2207 map->set_req(idx, vt);
2208 return vt->get_oop()->as_Phi();
2209 } else {
2210 PhiNode* phi = PhiNode::make(region, o, t);
2211 gvn().set_type(phi, t);
2212 if (C->do_escape_analysis()) record_for_igvn(phi);
2213 map->set_req(idx, phi);
2214 return phi;
2215 }
2216 }
2217
2218 //--------------------------ensure_memory_phi----------------------------------
2219 // Turn the idx'th slice of the current memory into a Phi
2220 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2221 MergeMemNode* mem = merged_memory();
2222 Node* region = control();
2223 assert(region->is_Region(), "");
2224
2225 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2226 assert(o != nullptr && o != top(), "");
2227
2228 PhiNode* phi;
2229 if (o->is_Phi() && o->as_Phi()->region() == region) {
2230 phi = o->as_Phi();
2231 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2232 // clone the shared base memory phi to make a new memory split
2233 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2234 const Type* t = phi->bottom_type();
2235 const TypePtr* adr_type = C->get_adr_type(idx);
2363 Node* chk = _gvn.transform( new CmpINode(opq, profile_state) );
2364 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
2365 // Branch to failure if state was changed
2366 { BuildCutout unless(this, tst, PROB_ALWAYS);
2367 uncommon_trap(Deoptimization::Reason_rtm_state_change,
2368 Deoptimization::Action_make_not_entrant);
2369 }
2370 }
2371 #endif
2372 }
2373
2374 //------------------------------return_current---------------------------------
2375 // Append current _map to _exit_return
2376 void Parse::return_current(Node* value) {
2377 if (RegisterFinalizersAtInit &&
2378 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2379 call_register_finalizer();
2380 }
2381
2382 // Do not set_parse_bci, so that return goo is credited to the return insn.
2383 // vreturn can trigger an allocation so vreturn can throw. Setting
2384 // the bci here breaks exception handling. Commenting this out
2385 // doesn't seem to break anything.
2386 // set_bci(InvocationEntryBci);
2387 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2388 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2389 }
2390 if (C->env()->dtrace_method_probes()) {
2391 make_dtrace_method_exit(method());
2392 }
2393 // frame pointer is always same, already captured
2394 if (value != nullptr) {
2395 Node* phi = _exits.argument(0);
2396 const Type* return_type = phi->bottom_type();
2397 const TypeInstPtr* tr = return_type->isa_instptr();
2398 assert(!value->is_InlineType() || !value->as_InlineType()->is_larval(), "returning a larval");
2399 if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2400 return_type->is_inlinetypeptr()) {
2401 // Inline type is returned as fields, make sure it is scalarized
2402 if (!value->is_InlineType()) {
2403 value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass(), false);
2404 }
2405 if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2406 // Returning from root or an incrementally inlined method. Make sure all non-flat
2407 // fields are buffered and re-execute if allocation triggers deoptimization.
2408 PreserveReexecuteState preexecs(this);
2409 assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2410 jvms()->set_should_reexecute(true);
2411 inc_sp(1);
2412 value = value->as_InlineType()->allocate_fields(this);
2413 }
2414 } else if (value->is_InlineType()) {
2415 // Inline type is returned as oop, make sure it is buffered and re-execute
2416 // if allocation triggers deoptimization.
2417 PreserveReexecuteState preexecs(this);
2418 jvms()->set_should_reexecute(true);
2419 inc_sp(1);
2420 value = value->as_InlineType()->buffer(this);
2421 }
2422 // ...else
2423 // If returning oops to an interface-return, there is a silent free
2424 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2425 phi->add_req(value);
2426 }
2427
2428 SafePointNode* exit_return = _exits.map();
2429 exit_return->in( TypeFunc::Control )->add_req( control() );
2430 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2431 Node *mem = exit_return->in( TypeFunc::Memory );
2432 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2433 if (mms.is_empty()) {
2434 // get a copy of the base memory, and patch just this one input
2435 const TypePtr* adr_type = mms.adr_type(C);
2436 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2437 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2438 gvn().set_type_bottom(phi);
2439 phi->del_req(phi->req()-1); // prepare to re-patch
2440 mms.set_memory(phi);
2441 }
2442 mms.memory()->add_req(mms.memory2());
2443 }
2444
2445 if (_first_return) {
2446 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2447 _first_return = false;
2448 } else {
2449 _exits.map()->merge_replaced_nodes_with(map());
2450 }
2451
2452 stop_and_kill_map(); // This CFG path dies here
2453 }
2454
2455
2456 //------------------------------add_safepoint----------------------------------
2457 void Parse::add_safepoint() {
2458 uint parms = TypeFunc::Parms+1;
2459
2460 // Clear out dead values from the debug info.
2461 kill_dead_locals();
2462
2463 // Clone the JVM State
2464 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|