13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/method.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/c2compiler.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/idealGraphPrinter.hpp"
34 #include "opto/locknode.hpp"
35 #include "opto/memnode.hpp"
36 #include "opto/opaquenode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/type.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/copy.hpp"
46
47 // Static array so we can figure out which bytecodes stop us from compiling
48 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
49 // and eventually should be encapsulated in a proper class (gri 8/18/98).
50
51 #ifndef PRODUCT
52 uint nodes_created = 0;
53 uint methods_parsed = 0;
85 }
86 if (all_null_checks_found) {
87 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
88 (100*implicit_null_checks)/all_null_checks_found);
89 }
90 if (SharedRuntime::_implicit_null_throws) {
91 tty->print_cr("%u implicit null exceptions at runtime",
92 SharedRuntime::_implicit_null_throws);
93 }
94
95 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
96 BytecodeParseHistogram::print();
97 }
98 }
99 #endif
100
101 //------------------------------ON STACK REPLACEMENT---------------------------
102
103 // Construct a node which can be used to get incoming state for
104 // on stack replacement.
105 Node *Parse::fetch_interpreter_state(int index,
106 BasicType bt,
107 Node *local_addrs,
108 Node *local_addrs_base) {
109 Node *mem = memory(Compile::AliasIdxRaw);
110 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
111 Node *ctl = control();
112
113 // Very similar to LoadNode::make, except we handle un-aligned longs and
114 // doubles on Sparc. Intel can handle them just fine directly.
115 Node *l = nullptr;
116 switch (bt) { // Signature is flattened
117 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
118 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
119 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
120 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
121 case T_LONG:
122 case T_DOUBLE: {
123 // Since arguments are in reverse order, the argument address 'adr'
124 // refers to the back half of the long/double. Recompute adr.
125 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
126 if (Matcher::misaligned_doubles_ok) {
127 l = (bt == T_DOUBLE)
128 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
130 } else {
131 l = (bt == T_DOUBLE)
132 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
133 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
134 }
135 break;
136 }
137 default: ShouldNotReachHere();
138 }
139 return _gvn.transform(l);
140 }
141
142 // Helper routine to prevent the interpreter from handing
143 // unexpected typestate to an OSR method.
144 // The Node l is a value newly dug out of the interpreter frame.
145 // The type is the type predicted by ciTypeFlow. Note that it is
146 // not a general type, but can only come from Type::get_typeflow_type.
147 // The safepoint is a map which will feed an uncommon trap.
148 Node* Parse::check_interpreter_type(Node* l, const Type* type,
149 SafePointNode* &bad_type_exit) {
150
151 const TypeOopPtr* tp = type->isa_oopptr();
152
153 // TypeFlow may assert null-ness if a type appears unloaded.
154 if (type == TypePtr::NULL_PTR ||
155 (tp != nullptr && !tp->is_loaded())) {
156 // Value must be null, not a real oop.
157 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
158 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
159 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
160 set_control(_gvn.transform( new IfTrueNode(iff) ));
161 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
162 bad_type_exit->control()->add_req(bad_type);
163 l = null();
164 }
165
166 // Typeflow can also cut off paths from the CFG, based on
167 // types which appear unloaded, or call sites which appear unlinked.
168 // When paths are cut off, values at later merge points can rise
169 // toward more specific classes. Make sure these specific classes
170 // are still in effect.
171 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
172 // TypeFlow asserted a specific object type. Value must have that type.
173 Node* bad_type_ctrl = nullptr;
174 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
175 bad_type_exit->control()->add_req(bad_type_ctrl);
176 }
177
178 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
179 return l;
180 }
181
182 // Helper routine which sets up elements of the initial parser map when
183 // performing a parse for on stack replacement. Add values into map.
184 // The only parameter contains the address of a interpreter arguments.
185 void Parse::load_interpreter_state(Node* osr_buf) {
186 int index;
187 int max_locals = jvms()->loc_size();
188 int max_stack = jvms()->stk_size();
189
190
191 // Mismatch between method and jvms can occur since map briefly held
192 // an OSR entry state (which takes up one RawPtr word).
193 assert(max_locals == method()->max_locals(), "sanity");
194 assert(max_stack >= method()->max_stack(), "sanity");
195 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
196 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
197
198 // Find the start block.
199 Block* osr_block = start_block();
200 assert(osr_block->start() == osr_bci(), "sanity");
201
202 // Set initial BCI.
203 set_parse_bci(osr_block->start());
204
205 // Set initial stack depth.
206 set_sp(osr_block->start_sp());
207
208 // Check bailouts. We currently do not perform on stack replacement
209 // of loops in catch blocks or loops which branch with a non-empty stack.
210 if (sp() != 0) {
225 for (index = 0; index < mcnt; index++) {
226 // Make a BoxLockNode for the monitor.
227 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
228 // Check for bailout after new BoxLockNode
229 if (failing()) { return; }
230
231 // This OSR locking region is unbalanced because it does not have Lock node:
232 // locking was done in Interpreter.
233 // This is similar to Coarsened case when Lock node is eliminated
234 // and as result the region is marked as Unbalanced.
235
236 // Emulate Coarsened state transition from Regular to Unbalanced.
237 osr_box->set_coarsened();
238 osr_box->set_unbalanced();
239
240 Node* box = _gvn.transform(osr_box);
241
242 // Displaced headers and locked objects are interleaved in the
243 // temp OSR buffer. We only copy the locked objects out here.
244 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
245 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
246 // Try and copy the displaced header to the BoxNode
247 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
248
249
250 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
251
252 // Build a bogus FastLockNode (no code will be generated) and push the
253 // monitor into our debug info.
254 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
255 map()->push_monitor(flock);
256
257 // If the lock is our method synchronization lock, tuck it away in
258 // _sync_lock for return and rethrow exit paths.
259 if (index == 0 && method()->is_synchronized()) {
260 _synch_lock = flock;
261 }
262 }
263
264 // Use the raw liveness computation to make sure that unexpected
265 // values don't propagate into the OSR frame.
266 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
267 if (!live_locals.is_valid()) {
268 // Degenerate or breakpointed method.
296 if (C->log() != nullptr) {
297 C->log()->elem("OSR_mismatch local_index='%d'",index);
298 }
299 set_local(index, null());
300 // and ignore it for the loads
301 continue;
302 }
303 }
304
305 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
306 if (type == Type::TOP || type == Type::HALF) {
307 continue;
308 }
309 // If the type falls to bottom, then this must be a local that
310 // is mixing ints and oops or some such. Forcing it to top
311 // makes it go dead.
312 if (type == Type::BOTTOM) {
313 continue;
314 }
315 // Construct code to access the appropriate local.
316 BasicType bt = type->basic_type();
317 if (type == TypePtr::NULL_PTR) {
318 // Ptr types are mixed together with T_ADDRESS but null is
319 // really for T_OBJECT types so correct it.
320 bt = T_OBJECT;
321 }
322 Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
323 set_local(index, value);
324 }
325
326 // Extract the needed stack entries from the interpreter frame.
327 for (index = 0; index < sp(); index++) {
328 const Type *type = osr_block->stack_type_at(index);
329 if (type != Type::TOP) {
330 // Currently the compiler bails out when attempting to on stack replace
331 // at a bci with a non-empty stack. We should not reach here.
332 ShouldNotReachHere();
333 }
334 }
335
336 // End the OSR migration
337 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
338 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
339 "OSR_migration_end", TypeRawPtr::BOTTOM,
340 osr_buf);
341
342 // Now that the interpreter state is loaded, make sure it will match
502 // either breakpoint setting or hotswapping of methods may
503 // cause deoptimization.
504 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
505 C->dependencies()->assert_evol_method(method());
506 }
507
508 NOT_PRODUCT(methods_seen++);
509
510 // Do some special top-level things.
511 if (depth() == 1 && C->is_osr_compilation()) {
512 _tf = C->tf(); // the OSR entry type is different
513 _entry_bci = C->entry_bci();
514 _flow = method()->get_osr_flow_analysis(osr_bci());
515 } else {
516 _tf = TypeFunc::make(method());
517 _entry_bci = InvocationEntryBci;
518 _flow = method()->get_flow_analysis();
519 }
520
521 if (_flow->failing()) {
522 assert(false, "type flow analysis failed during parsing");
523 C->record_method_not_compilable(_flow->failure_reason());
524 #ifndef PRODUCT
525 if (PrintOpto && (Verbose || WizardMode)) {
526 if (is_osr_parse()) {
527 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
528 } else {
529 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
530 }
531 if (Verbose) {
532 method()->print();
533 method()->print_codes();
534 _flow->print();
535 }
536 }
537 #endif
538 }
539
540 #ifdef ASSERT
541 if (depth() == 1) {
542 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
593 load_interpreter_state(osr_buf);
594 } else {
595 set_map(entry_map);
596 do_method_entry();
597 }
598
599 if (depth() == 1 && !failing()) {
600 if (C->clinit_barrier_on_entry()) {
601 // Add check to deoptimize the nmethod once the holder class is fully initialized
602 clinit_deopt();
603 }
604 }
605
606 // Check for bailouts during method entry.
607 if (failing()) {
608 if (log) log->done("parse");
609 C->set_default_node_notes(caller_nn);
610 return;
611 }
612
613 entry_map = map(); // capture any changes performed by method setup code
614 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
615
616 // We begin parsing as if we have just encountered a jump to the
617 // method entry.
618 Block* entry_block = start_block();
619 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
620 set_map_clone(entry_map);
621 merge_common(entry_block, entry_block->next_path_num());
622
623 #ifndef PRODUCT
624 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
625 set_parse_histogram( parse_histogram_obj );
626 #endif
627
628 // Parse all the basic blocks.
629 do_all_blocks();
630
631 // Check for bailouts during conversion to graph
632 if (failing()) {
778 void Parse::build_exits() {
779 // make a clone of caller to prevent sharing of side-effects
780 _exits.set_map(_exits.clone_map());
781 _exits.clean_stack(_exits.sp());
782 _exits.sync_jvms();
783
784 RegionNode* region = new RegionNode(1);
785 record_for_igvn(region);
786 gvn().set_type_bottom(region);
787 _exits.set_control(region);
788
789 // Note: iophi and memphi are not transformed until do_exits.
790 Node* iophi = new PhiNode(region, Type::ABIO);
791 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
792 gvn().set_type_bottom(iophi);
793 gvn().set_type_bottom(memphi);
794 _exits.set_i_o(iophi);
795 _exits.set_all_memory(memphi);
796
797 // Add a return value to the exit state. (Do not push it yet.)
798 if (tf()->range()->cnt() > TypeFunc::Parms) {
799 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
800 if (ret_type->isa_int()) {
801 BasicType ret_bt = method()->return_type()->basic_type();
802 if (ret_bt == T_BOOLEAN ||
803 ret_bt == T_CHAR ||
804 ret_bt == T_BYTE ||
805 ret_bt == T_SHORT) {
806 ret_type = TypeInt::INT;
807 }
808 }
809
810 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
811 // becomes loaded during the subsequent parsing, the loaded and unloaded
812 // types will not join when we transform and push in do_exits().
813 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
814 if (ret_oop_type && !ret_oop_type->is_loaded()) {
815 ret_type = TypeOopPtr::BOTTOM;
816 }
817 int ret_size = type2size[ret_type->basic_type()];
818 Node* ret_phi = new PhiNode(region, ret_type);
819 gvn().set_type_bottom(ret_phi);
820 _exits.ensure_stack(ret_size);
821 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
822 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
823 _exits.set_argument(0, ret_phi); // here is where the parser finds it
824 // Note: ret_phi is not yet pushed, until do_exits.
825 }
826 }
827
828
829 //----------------------------build_start_state-------------------------------
830 // Construct a state which contains only the incoming arguments from an
831 // unknown caller. The method & bci will be null & InvocationEntryBci.
832 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
833 int arg_size = tf->domain()->cnt();
834 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
835 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
836 SafePointNode* map = new SafePointNode(max_size, jvms);
837 record_for_igvn(map);
838 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
839 Node_Notes* old_nn = default_node_notes();
840 if (old_nn != nullptr && has_method()) {
841 Node_Notes* entry_nn = old_nn->clone(this);
842 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
843 entry_jvms->set_offsets(0);
844 entry_jvms->set_bci(entry_bci());
845 entry_nn->set_jvms(entry_jvms);
846 set_default_node_notes(entry_nn);
847 }
848 uint i;
849 for (i = 0; i < (uint)arg_size; i++) {
850 Node* parm = initial_gvn()->transform(new ParmNode(start, i));
851 map->init_req(i, parm);
852 // Record all these guys for later GVN.
853 record_for_igvn(parm);
854 }
855 for (; i < map->req(); i++) {
856 map->init_req(i, top());
857 }
858 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
859 set_default_node_notes(old_nn);
860 jvms->set_map(map);
861 return jvms;
862 }
863
864 //-----------------------------make_node_notes---------------------------------
865 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
866 if (caller_nn == nullptr) return nullptr;
867 Node_Notes* nn = caller_nn->clone(C);
868 JVMState* caller_jvms = nn->jvms();
869 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
870 jvms->set_offsets(0);
871 jvms->set_bci(_entry_bci);
872 nn->set_jvms(jvms);
873 return nn;
874 }
875
876
877 //--------------------------return_values--------------------------------------
878 void Compile::return_values(JVMState* jvms) {
879 GraphKit kit(jvms);
880 Node* ret = new ReturnNode(TypeFunc::Parms,
881 kit.control(),
882 kit.i_o(),
883 kit.reset_memory(),
884 kit.frameptr(),
885 kit.returnadr());
886 // Add zero or 1 return values
887 int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
888 if (ret_size > 0) {
889 kit.inc_sp(-ret_size); // pop the return value(s)
890 kit.sync_jvms();
891 ret->add_req(kit.argument(0));
892 // Note: The second dummy edge is not needed by a ReturnNode.
893 }
894 // bind it to root
895 root()->add_req(ret);
896 record_for_igvn(ret);
897 initial_gvn()->transform(ret);
898 }
899
900 //------------------------rethrow_exceptions-----------------------------------
901 // Bind all exception states in the list into a single RethrowNode.
902 void Compile::rethrow_exceptions(JVMState* jvms) {
903 GraphKit kit(jvms);
904 if (!kit.has_exceptions()) return; // nothing to generate
905 // Load my combined exception state into the kit, with all phis transformed:
906 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
907 Node* ex_oop = kit.use_exception_state(ex_map);
908 RethrowNode* exit = new RethrowNode(kit.control(),
909 kit.i_o(), kit.reset_memory(),
910 kit.frameptr(), kit.returnadr(),
911 // like a return but with exception input
912 ex_oop);
996 // to complete, we force all writes to complete.
997 //
998 // 2. Experimental VM option is used to force the barrier if any field
999 // was written out in the constructor.
1000 //
1001 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1002 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1003 // MemBarVolatile is used before volatile load instead of after volatile
1004 // store, so there's no barrier after the store.
1005 // We want to guarantee the same behavior as on platforms with total store
1006 // order, although this is not required by the Java memory model.
1007 // In this case, we want to enforce visibility of volatile field
1008 // initializations which are performed in constructors.
1009 // So as with finals, we add a barrier here.
1010 //
1011 // "All bets are off" unless the first publication occurs after a
1012 // normal return from the constructor. We do not attempt to detect
1013 // such unusual early publications. But no barrier is needed on
1014 // exceptional returns, since they cannot publish normally.
1015 //
1016 if (method()->is_object_initializer() &&
1017 (wrote_final() || wrote_stable() ||
1018 (AlwaysSafeConstructors && wrote_fields()) ||
1019 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1020 Node* recorded_alloc = alloc_with_final_or_stable();
1021 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1022 recorded_alloc);
1023
1024 // If Memory barrier is created for final fields write
1025 // and allocation node does not escape the initialize method,
1026 // then barrier introduced by allocation node can be removed.
1027 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1028 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1029 alloc->compute_MemBar_redundancy(method());
1030 }
1031 if (PrintOpto && (Verbose || WizardMode)) {
1032 method()->print_name();
1033 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1034 }
1035 }
1036
1037 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1038 // transform each slice of the original memphi:
1039 mms.set_memory(_gvn.transform(mms.memory()));
1040 }
1041 // Clean up input MergeMems created by transforming the slices
1042 _gvn.transform(_exits.merged_memory());
1043
1044 if (tf()->range()->cnt() > TypeFunc::Parms) {
1045 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1046 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1047 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1048 // If the type we set for the ret_phi in build_exits() is too optimistic and
1049 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1050 // loading. It could also be due to an error, so mark this method as not compilable because
1051 // otherwise this could lead to an infinite compile loop.
1052 // In any case, this code path is rarely (and never in my testing) reached.
1053 C->record_method_not_compilable("Can't determine return type.");
1054 return;
1055 }
1056 if (ret_type->isa_int()) {
1057 BasicType ret_bt = method()->return_type()->basic_type();
1058 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1059 }
1060 _exits.push_node(ret_type->basic_type(), ret_phi);
1061 }
1062
1063 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1064
1065 // Unlock along the exceptional paths.
1119
1120 //-----------------------------create_entry_map-------------------------------
1121 // Initialize our parser map to contain the types at method entry.
1122 // For OSR, the map contains a single RawPtr parameter.
1123 // Initial monitor locking for sync. methods is performed by do_method_entry.
1124 SafePointNode* Parse::create_entry_map() {
1125 // Check for really stupid bail-out cases.
1126 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1127 if (len >= 32760) {
1128 // Bailout expected, this is a very rare edge case.
1129 C->record_method_not_compilable("too many local variables");
1130 return nullptr;
1131 }
1132
1133 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1134 _caller->map()->delete_replaced_nodes();
1135
1136 // If this is an inlined method, we may have to do a receiver null check.
1137 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1138 GraphKit kit(_caller);
1139 kit.null_check_receiver_before_call(method());
1140 _caller = kit.transfer_exceptions_into_jvms();
1141 if (kit.stopped()) {
1142 _exits.add_exception_states_from(_caller);
1143 _exits.set_jvms(_caller);
1144 return nullptr;
1145 }
1146 }
1147
1148 assert(method() != nullptr, "parser must have a method");
1149
1150 // Create an initial safepoint to hold JVM state during parsing
1151 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1152 set_map(new SafePointNode(len, jvms));
1153 jvms->set_map(map());
1154 record_for_igvn(map());
1155 assert(jvms->endoff() == len, "correct jvms sizing");
1156
1157 SafePointNode* inmap = _caller->map();
1158 assert(inmap != nullptr, "must have inmap");
1159 // In case of null check on receiver above
1160 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1161
1162 uint i;
1163
1164 // Pass thru the predefined input parameters.
1165 for (i = 0; i < TypeFunc::Parms; i++) {
1166 map()->init_req(i, inmap->in(i));
1167 }
1168
1169 if (depth() == 1) {
1170 assert(map()->memory()->Opcode() == Op_Parm, "");
1171 // Insert the memory aliasing node
1172 set_all_memory(reset_memory());
1173 }
1174 assert(merged_memory(), "");
1175
1176 // Now add the locals which are initially bound to arguments:
1177 uint arg_size = tf()->domain()->cnt();
1178 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1179 for (i = TypeFunc::Parms; i < arg_size; i++) {
1180 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1181 }
1182
1183 // Clear out the rest of the map (locals and stack)
1184 for (i = arg_size; i < len; i++) {
1185 map()->init_req(i, top());
1186 }
1187
1188 SafePointNode* entry_map = stop();
1189 return entry_map;
1190 }
1191
1192 //-----------------------------do_method_entry--------------------------------
1193 // Emit any code needed in the pseudo-block before BCI zero.
1194 // The main thing to do is lock the receiver of a synchronized method.
1195 void Parse::do_method_entry() {
1196 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1197 set_sp(0); // Java Stack Pointer
1231
1232 // If the method is synchronized, we need to construct a lock node, attach
1233 // it to the Start node, and pin it there.
1234 if (method()->is_synchronized()) {
1235 // Insert a FastLockNode right after the Start which takes as arguments
1236 // the current thread pointer, the "this" pointer & the address of the
1237 // stack slot pair used for the lock. The "this" pointer is a projection
1238 // off the start node, but the locking spot has to be constructed by
1239 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1240 // becomes the second argument to the FastLockNode call. The
1241 // FastLockNode becomes the new control parent to pin it to the start.
1242
1243 // Setup Object Pointer
1244 Node *lock_obj = nullptr;
1245 if (method()->is_static()) {
1246 ciInstance* mirror = _method->holder()->java_mirror();
1247 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1248 lock_obj = makecon(t_lock);
1249 } else { // Else pass the "this" pointer,
1250 lock_obj = local(0); // which is Parm0 from StartNode
1251 }
1252 // Clear out dead values from the debug info.
1253 kill_dead_locals();
1254 // Build the FastLockNode
1255 _synch_lock = shared_lock(lock_obj);
1256 // Check for bailout in shared_lock
1257 if (failing()) { return; }
1258 }
1259
1260 // Feed profiling data for parameters to the type system so it can
1261 // propagate it as speculative types
1262 record_profiled_parameters_for_speculation();
1263 }
1264
1265 //------------------------------init_blocks------------------------------------
1266 // Initialize our parser map to contain the types/monitors at method entry.
1267 void Parse::init_blocks() {
1268 // Create the blocks.
1269 _block_count = flow()->block_count();
1270 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1666 //--------------------handle_missing_successor---------------------------------
1667 void Parse::handle_missing_successor(int target_bci) {
1668 #ifndef PRODUCT
1669 Block* b = block();
1670 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1671 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1672 #endif
1673 ShouldNotReachHere();
1674 }
1675
1676 //--------------------------merge_common---------------------------------------
1677 void Parse::merge_common(Parse::Block* target, int pnum) {
1678 if (TraceOptoParse) {
1679 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1680 }
1681
1682 // Zap extra stack slots to top
1683 assert(sp() == target->start_sp(), "");
1684 clean_stack(sp());
1685
1686 if (!target->is_merged()) { // No prior mapping at this bci
1687 if (TraceOptoParse) { tty->print(" with empty state"); }
1688
1689 // If this path is dead, do not bother capturing it as a merge.
1690 // It is "as if" we had 1 fewer predecessors from the beginning.
1691 if (stopped()) {
1692 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1693 return;
1694 }
1695
1696 // Make a region if we know there are multiple or unpredictable inputs.
1697 // (Also, if this is a plain fall-through, we might see another region,
1698 // which must not be allowed into this block's map.)
1699 if (pnum > PhiNode::Input // Known multiple inputs.
1700 || target->is_handler() // These have unpredictable inputs.
1701 || target->is_loop_head() // Known multiple inputs
1702 || control()->is_Region()) { // We must hide this guy.
1703
1704 int current_bci = bci();
1705 set_parse_bci(target->start()); // Set target bci
1720 record_for_igvn(r);
1721 // zap all inputs to null for debugging (done in Node(uint) constructor)
1722 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1723 r->init_req(pnum, control());
1724 set_control(r);
1725 target->copy_irreducible_status_to(r, jvms());
1726 set_parse_bci(current_bci); // Restore bci
1727 }
1728
1729 // Convert the existing Parser mapping into a mapping at this bci.
1730 store_state_to(target);
1731 assert(target->is_merged(), "do not come here twice");
1732
1733 } else { // Prior mapping at this bci
1734 if (TraceOptoParse) { tty->print(" with previous state"); }
1735 #ifdef ASSERT
1736 if (target->is_SEL_head()) {
1737 target->mark_merged_backedge(block());
1738 }
1739 #endif
1740 // We must not manufacture more phis if the target is already parsed.
1741 bool nophi = target->is_parsed();
1742
1743 SafePointNode* newin = map();// Hang on to incoming mapping
1744 Block* save_block = block(); // Hang on to incoming block;
1745 load_state_from(target); // Get prior mapping
1746
1747 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1748 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1749 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1750 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1751
1752 // Iterate over my current mapping and the old mapping.
1753 // Where different, insert Phi functions.
1754 // Use any existing Phi functions.
1755 assert(control()->is_Region(), "must be merging to a region");
1756 RegionNode* r = control()->as_Region();
1757
1758 // Compute where to merge into
1759 // Merge incoming control path
1760 r->init_req(pnum, newin->control());
1761
1762 if (pnum == 1) { // Last merge for this Region?
1763 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1764 Node* result = _gvn.transform(r);
1765 if (r != result && TraceOptoParse) {
1766 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1767 }
1768 }
1769 record_for_igvn(r);
1770 }
1771
1772 // Update all the non-control inputs to map:
1773 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1774 bool check_elide_phi = target->is_SEL_backedge(save_block);
1775 for (uint j = 1; j < newin->req(); j++) {
1776 Node* m = map()->in(j); // Current state of target.
1777 Node* n = newin->in(j); // Incoming change to target state.
1778 PhiNode* phi;
1779 if (m->is_Phi() && m->as_Phi()->region() == r)
1780 phi = m->as_Phi();
1781 else
1782 phi = nullptr;
1783 if (m != n) { // Different; must merge
1784 switch (j) {
1785 // Frame pointer and Return Address never changes
1786 case TypeFunc::FramePtr:// Drop m, use the original value
1787 case TypeFunc::ReturnAdr:
1788 break;
1789 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1790 assert(phi == nullptr, "the merge contains phis, not vice versa");
1791 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1792 continue;
1793 default: // All normal stuff
1794 if (phi == nullptr) {
1795 const JVMState* jvms = map()->jvms();
1796 if (EliminateNestedLocks &&
1797 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1798 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1799 // Use old BoxLock node as merged box.
1800 assert(newin->jvms()->is_monitor_box(j), "sanity");
1801 // This assert also tests that nodes are BoxLock.
1802 assert(BoxLockNode::same_slot(n, m), "sanity");
1809 // Incremental Inlining before EA and Macro nodes elimination.
1810 //
1811 // Incremental Inlining is executed after IGVN optimizations
1812 // during which BoxLock can be marked as Coarsened.
1813 old_box->set_coarsened(); // Verifies state
1814 old_box->set_unbalanced();
1815 }
1816 C->gvn_replace_by(n, m);
1817 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1818 phi = ensure_phi(j, nophi);
1819 }
1820 }
1821 break;
1822 }
1823 }
1824 // At this point, n might be top if:
1825 // - there is no phi (because TypeFlow detected a conflict), or
1826 // - the corresponding control edges is top (a dead incoming path)
1827 // It is a bug if we create a phi which sees a garbage value on a live path.
1828
1829 if (phi != nullptr) {
1830 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1831 assert(phi->region() == r, "");
1832 phi->set_req(pnum, n); // Then add 'n' to the merge
1833 if (pnum == PhiNode::Input) {
1834 // Last merge for this Phi.
1835 // So far, Phis have had a reasonable type from ciTypeFlow.
1836 // Now _gvn will join that with the meet of current inputs.
1837 // BOTTOM is never permissible here, 'cause pessimistically
1838 // Phis of pointers cannot lose the basic pointer type.
1839 debug_only(const Type* bt1 = phi->bottom_type());
1840 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1841 map()->set_req(j, _gvn.transform(phi));
1842 debug_only(const Type* bt2 = phi->bottom_type());
1843 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1844 record_for_igvn(phi);
1845 }
1846 }
1847 } // End of for all values to be merged
1848
1849 if (pnum == PhiNode::Input &&
1850 !r->in(0)) { // The occasional useless Region
1851 assert(control() == r, "");
1852 set_control(r->nonnull_req());
1853 }
1854
1855 map()->merge_replaced_nodes_with(newin);
1856
1857 // newin has been subsumed into the lazy merge, and is now dead.
1858 set_block(save_block);
1859
1860 stop(); // done with this guy, for now
1861 }
1862
1863 if (TraceOptoParse) {
1864 tty->print_cr(" on path %d", pnum);
1865 }
1866
1867 // Done with this parser state.
1868 assert(stopped(), "");
1869 }
1870
1982
1983 // Add new path to the region.
1984 uint pnum = r->req();
1985 r->add_req(nullptr);
1986
1987 for (uint i = 1; i < map->req(); i++) {
1988 Node* n = map->in(i);
1989 if (i == TypeFunc::Memory) {
1990 // Ensure a phi on all currently known memories.
1991 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1992 Node* phi = mms.memory();
1993 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1994 assert(phi->req() == pnum, "must be same size as region");
1995 phi->add_req(nullptr);
1996 }
1997 }
1998 } else {
1999 if (n->is_Phi() && n->as_Phi()->region() == r) {
2000 assert(n->req() == pnum, "must be same size as region");
2001 n->add_req(nullptr);
2002 }
2003 }
2004 }
2005
2006 return pnum;
2007 }
2008
2009 //------------------------------ensure_phi-------------------------------------
2010 // Turn the idx'th entry of the current map into a Phi
2011 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2012 SafePointNode* map = this->map();
2013 Node* region = map->control();
2014 assert(region->is_Region(), "");
2015
2016 Node* o = map->in(idx);
2017 assert(o != nullptr, "");
2018
2019 if (o == top()) return nullptr; // TOP always merges into TOP
2020
2021 if (o->is_Phi() && o->as_Phi()->region() == region) {
2022 return o->as_Phi();
2023 }
2024
2025 // Now use a Phi here for merging
2026 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2027 const JVMState* jvms = map->jvms();
2028 const Type* t = nullptr;
2029 if (jvms->is_loc(idx)) {
2030 t = block()->local_type_at(idx - jvms->locoff());
2031 } else if (jvms->is_stk(idx)) {
2032 t = block()->stack_type_at(idx - jvms->stkoff());
2033 } else if (jvms->is_mon(idx)) {
2034 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2035 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2036 } else if ((uint)idx < TypeFunc::Parms) {
2037 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2038 } else {
2039 assert(false, "no type information for this phi");
2040 }
2041
2042 // If the type falls to bottom, then this must be a local that
2043 // is mixing ints and oops or some such. Forcing it to top
2044 // makes it go dead.
2045 if (t == Type::BOTTOM) {
2046 map->set_req(idx, top());
2047 return nullptr;
2048 }
2049
2050 // Do not create phis for top either.
2051 // A top on a non-null control flow must be an unused even after the.phi.
2052 if (t == Type::TOP || t == Type::HALF) {
2053 map->set_req(idx, top());
2054 return nullptr;
2055 }
2056
2057 PhiNode* phi = PhiNode::make(region, o, t);
2058 gvn().set_type(phi, t);
2059 if (C->do_escape_analysis()) record_for_igvn(phi);
2060 map->set_req(idx, phi);
2061 return phi;
2062 }
2063
2064 //--------------------------ensure_memory_phi----------------------------------
2065 // Turn the idx'th slice of the current memory into a Phi
2066 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2067 MergeMemNode* mem = merged_memory();
2068 Node* region = control();
2069 assert(region->is_Region(), "");
2070
2071 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2072 assert(o != nullptr && o != top(), "");
2073
2074 PhiNode* phi;
2075 if (o->is_Phi() && o->as_Phi()->region() == region) {
2076 phi = o->as_Phi();
2077 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2078 // clone the shared base memory phi to make a new memory split
2079 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2080 const Type* t = phi->bottom_type();
2081 const TypePtr* adr_type = C->get_adr_type(idx);
2171 // Add check to deoptimize once holder klass is fully initialized.
2172 void Parse::clinit_deopt() {
2173 assert(C->has_method(), "only for normal compilations");
2174 assert(depth() == 1, "only for main compiled method");
2175 assert(is_normal_parse(), "no barrier needed on osr entry");
2176 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2177
2178 set_parse_bci(0);
2179
2180 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2181 guard_klass_being_initialized(holder);
2182 }
2183
2184 //------------------------------return_current---------------------------------
2185 // Append current _map to _exit_return
2186 void Parse::return_current(Node* value) {
2187 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2188 call_register_finalizer();
2189 }
2190
2191 // Do not set_parse_bci, so that return goo is credited to the return insn.
2192 set_bci(InvocationEntryBci);
2193 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2194 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2195 }
2196 if (C->env()->dtrace_method_probes()) {
2197 make_dtrace_method_exit(method());
2198 }
2199 SafePointNode* exit_return = _exits.map();
2200 exit_return->in( TypeFunc::Control )->add_req( control() );
2201 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2202 Node *mem = exit_return->in( TypeFunc::Memory );
2203 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2204 if (mms.is_empty()) {
2205 // get a copy of the base memory, and patch just this one input
2206 const TypePtr* adr_type = mms.adr_type(C);
2207 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2208 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2209 gvn().set_type_bottom(phi);
2210 phi->del_req(phi->req()-1); // prepare to re-patch
2211 mms.set_memory(phi);
2212 }
2213 mms.memory()->add_req(mms.memory2());
2214 }
2215
2216 // frame pointer is always same, already captured
2217 if (value != nullptr) {
2218 // If returning oops to an interface-return, there is a silent free
2219 // cast from oop to interface allowed by the Verifier. Make it explicit
2220 // here.
2221 Node* phi = _exits.argument(0);
2222 phi->add_req(value);
2223 }
2224
2225 if (_first_return) {
2226 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2227 _first_return = false;
2228 } else {
2229 _exits.map()->merge_replaced_nodes_with(map());
2230 }
2231
2232 stop_and_kill_map(); // This CFG path dies here
2233 }
2234
2235
2236 //------------------------------add_safepoint----------------------------------
2237 void Parse::add_safepoint() {
2238 uint parms = TypeFunc::Parms+1;
2239
2240 // Clear out dead values from the debug info.
2241 kill_dead_locals();
2242
2243 // Clone the JVM State
2244 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/method.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/c2compiler.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/convertnode.hpp"
34 #include "opto/idealGraphPrinter.hpp"
35 #include "opto/inlinetypenode.hpp"
36 #include "opto/locknode.hpp"
37 #include "opto/memnode.hpp"
38 #include "opto/opaquenode.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/type.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/safepointMechanism.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/bitMap.inline.hpp"
47 #include "utilities/copy.hpp"
48
49 // Static array so we can figure out which bytecodes stop us from compiling
50 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
51 // and eventually should be encapsulated in a proper class (gri 8/18/98).
52
53 #ifndef PRODUCT
54 uint nodes_created = 0;
55 uint methods_parsed = 0;
87 }
88 if (all_null_checks_found) {
89 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
90 (100*implicit_null_checks)/all_null_checks_found);
91 }
92 if (SharedRuntime::_implicit_null_throws) {
93 tty->print_cr("%u implicit null exceptions at runtime",
94 SharedRuntime::_implicit_null_throws);
95 }
96
97 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
98 BytecodeParseHistogram::print();
99 }
100 }
101 #endif
102
103 //------------------------------ON STACK REPLACEMENT---------------------------
104
105 // Construct a node which can be used to get incoming state for
106 // on stack replacement.
107 Node* Parse::fetch_interpreter_state(int index,
108 const Type* type,
109 Node* local_addrs,
110 Node* local_addrs_base) {
111 BasicType bt = type->basic_type();
112 if (type == TypePtr::NULL_PTR) {
113 // Ptr types are mixed together with T_ADDRESS but nullptr is
114 // really for T_OBJECT types so correct it.
115 bt = T_OBJECT;
116 }
117 Node *mem = memory(Compile::AliasIdxRaw);
118 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
119 Node *ctl = control();
120
121 // Very similar to LoadNode::make, except we handle un-aligned longs and
122 // doubles on Sparc. Intel can handle them just fine directly.
123 Node *l = nullptr;
124 switch (bt) { // Signature is flattened
125 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
126 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
127 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
128 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
129 case T_LONG:
130 case T_DOUBLE: {
131 // Since arguments are in reverse order, the argument address 'adr'
132 // refers to the back half of the long/double. Recompute adr.
133 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
134 if (Matcher::misaligned_doubles_ok) {
135 l = (bt == T_DOUBLE)
136 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
138 } else {
139 l = (bt == T_DOUBLE)
140 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
141 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
142 }
143 break;
144 }
145 default: ShouldNotReachHere();
146 }
147 return _gvn.transform(l);
148 }
149
150 // Helper routine to prevent the interpreter from handing
151 // unexpected typestate to an OSR method.
152 // The Node l is a value newly dug out of the interpreter frame.
153 // The type is the type predicted by ciTypeFlow. Note that it is
154 // not a general type, but can only come from Type::get_typeflow_type.
155 // The safepoint is a map which will feed an uncommon trap.
156 Node* Parse::check_interpreter_type(Node* l, const Type* type,
157 SafePointNode* &bad_type_exit) {
158 const TypeOopPtr* tp = type->isa_oopptr();
159
160 // TypeFlow may assert null-ness if a type appears unloaded.
161 if (type == TypePtr::NULL_PTR ||
162 (tp != nullptr && !tp->is_loaded())) {
163 // Value must be null, not a real oop.
164 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
165 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
166 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
167 set_control(_gvn.transform( new IfTrueNode(iff) ));
168 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
169 bad_type_exit->control()->add_req(bad_type);
170 l = null();
171 }
172
173 // Typeflow can also cut off paths from the CFG, based on
174 // types which appear unloaded, or call sites which appear unlinked.
175 // When paths are cut off, values at later merge points can rise
176 // toward more specific classes. Make sure these specific classes
177 // are still in effect.
178 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
179 // TypeFlow asserted a specific object type. Value must have that type.
180 Node* bad_type_ctrl = nullptr;
181 if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
182 // Check inline types for null here to prevent checkcast from adding an
183 // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
184 l = null_check_oop(l, &bad_type_ctrl);
185 bad_type_exit->control()->add_req(bad_type_ctrl);
186 }
187 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
188 bad_type_exit->control()->add_req(bad_type_ctrl);
189 }
190
191 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
192 return l;
193 }
194
195 // Helper routine which sets up elements of the initial parser map when
196 // performing a parse for on stack replacement. Add values into map.
197 // The only parameter contains the address of a interpreter arguments.
198 void Parse::load_interpreter_state(Node* osr_buf) {
199 int index;
200 int max_locals = jvms()->loc_size();
201 int max_stack = jvms()->stk_size();
202
203 // Mismatch between method and jvms can occur since map briefly held
204 // an OSR entry state (which takes up one RawPtr word).
205 assert(max_locals == method()->max_locals(), "sanity");
206 assert(max_stack >= method()->max_stack(), "sanity");
207 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
208 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
209
210 // Find the start block.
211 Block* osr_block = start_block();
212 assert(osr_block->start() == osr_bci(), "sanity");
213
214 // Set initial BCI.
215 set_parse_bci(osr_block->start());
216
217 // Set initial stack depth.
218 set_sp(osr_block->start_sp());
219
220 // Check bailouts. We currently do not perform on stack replacement
221 // of loops in catch blocks or loops which branch with a non-empty stack.
222 if (sp() != 0) {
237 for (index = 0; index < mcnt; index++) {
238 // Make a BoxLockNode for the monitor.
239 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
240 // Check for bailout after new BoxLockNode
241 if (failing()) { return; }
242
243 // This OSR locking region is unbalanced because it does not have Lock node:
244 // locking was done in Interpreter.
245 // This is similar to Coarsened case when Lock node is eliminated
246 // and as result the region is marked as Unbalanced.
247
248 // Emulate Coarsened state transition from Regular to Unbalanced.
249 osr_box->set_coarsened();
250 osr_box->set_unbalanced();
251
252 Node* box = _gvn.transform(osr_box);
253
254 // Displaced headers and locked objects are interleaved in the
255 // temp OSR buffer. We only copy the locked objects out here.
256 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
257 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
258 // Try and copy the displaced header to the BoxNode
259 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
260
261 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
262
263 // Build a bogus FastLockNode (no code will be generated) and push the
264 // monitor into our debug info.
265 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
266 map()->push_monitor(flock);
267
268 // If the lock is our method synchronization lock, tuck it away in
269 // _sync_lock for return and rethrow exit paths.
270 if (index == 0 && method()->is_synchronized()) {
271 _synch_lock = flock;
272 }
273 }
274
275 // Use the raw liveness computation to make sure that unexpected
276 // values don't propagate into the OSR frame.
277 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
278 if (!live_locals.is_valid()) {
279 // Degenerate or breakpointed method.
307 if (C->log() != nullptr) {
308 C->log()->elem("OSR_mismatch local_index='%d'",index);
309 }
310 set_local(index, null());
311 // and ignore it for the loads
312 continue;
313 }
314 }
315
316 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
317 if (type == Type::TOP || type == Type::HALF) {
318 continue;
319 }
320 // If the type falls to bottom, then this must be a local that
321 // is mixing ints and oops or some such. Forcing it to top
322 // makes it go dead.
323 if (type == Type::BOTTOM) {
324 continue;
325 }
326 // Construct code to access the appropriate local.
327 Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
328 set_local(index, value);
329 }
330
331 // Extract the needed stack entries from the interpreter frame.
332 for (index = 0; index < sp(); index++) {
333 const Type *type = osr_block->stack_type_at(index);
334 if (type != Type::TOP) {
335 // Currently the compiler bails out when attempting to on stack replace
336 // at a bci with a non-empty stack. We should not reach here.
337 ShouldNotReachHere();
338 }
339 }
340
341 // End the OSR migration
342 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
343 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
344 "OSR_migration_end", TypeRawPtr::BOTTOM,
345 osr_buf);
346
347 // Now that the interpreter state is loaded, make sure it will match
507 // either breakpoint setting or hotswapping of methods may
508 // cause deoptimization.
509 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
510 C->dependencies()->assert_evol_method(method());
511 }
512
513 NOT_PRODUCT(methods_seen++);
514
515 // Do some special top-level things.
516 if (depth() == 1 && C->is_osr_compilation()) {
517 _tf = C->tf(); // the OSR entry type is different
518 _entry_bci = C->entry_bci();
519 _flow = method()->get_osr_flow_analysis(osr_bci());
520 } else {
521 _tf = TypeFunc::make(method());
522 _entry_bci = InvocationEntryBci;
523 _flow = method()->get_flow_analysis();
524 }
525
526 if (_flow->failing()) {
527 // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
528 // can lead to this. Re-enable once 8284443 is fixed.
529 //assert(false, "type flow analysis failed during parsing");
530 C->record_method_not_compilable(_flow->failure_reason());
531 #ifndef PRODUCT
532 if (PrintOpto && (Verbose || WizardMode)) {
533 if (is_osr_parse()) {
534 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
535 } else {
536 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
537 }
538 if (Verbose) {
539 method()->print();
540 method()->print_codes();
541 _flow->print();
542 }
543 }
544 #endif
545 }
546
547 #ifdef ASSERT
548 if (depth() == 1) {
549 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
600 load_interpreter_state(osr_buf);
601 } else {
602 set_map(entry_map);
603 do_method_entry();
604 }
605
606 if (depth() == 1 && !failing()) {
607 if (C->clinit_barrier_on_entry()) {
608 // Add check to deoptimize the nmethod once the holder class is fully initialized
609 clinit_deopt();
610 }
611 }
612
613 // Check for bailouts during method entry.
614 if (failing()) {
615 if (log) log->done("parse");
616 C->set_default_node_notes(caller_nn);
617 return;
618 }
619
620 // Handle inline type arguments
621 int arg_size = method()->arg_size();
622 for (int i = 0; i < arg_size; i++) {
623 Node* parm = local(i);
624 const Type* t = _gvn.type(parm);
625 if (t->is_inlinetypeptr()) {
626 // Create InlineTypeNode from the oop and replace the parameter
627 bool is_larval = (i == 0) && method()->is_object_constructor() && !method()->holder()->is_java_lang_Object();
628 Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass(), !t->maybe_null(), is_larval);
629 replace_in_map(parm, vt);
630 } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() &&
631 t->isa_aryptr() != nullptr && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_not_null_free()) {
632 // Speculate on varargs Object array being not null-free (and therefore also not flat)
633 const TypePtr* spec_type = t->speculative();
634 spec_type = (spec_type != nullptr && spec_type->isa_aryptr() != nullptr) ? spec_type : t->is_aryptr();
635 spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free();
636 spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
637 Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
638 replace_in_map(parm, cast);
639 }
640 }
641
642 entry_map = map(); // capture any changes performed by method setup code
643 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
644
645 // We begin parsing as if we have just encountered a jump to the
646 // method entry.
647 Block* entry_block = start_block();
648 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
649 set_map_clone(entry_map);
650 merge_common(entry_block, entry_block->next_path_num());
651
652 #ifndef PRODUCT
653 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
654 set_parse_histogram( parse_histogram_obj );
655 #endif
656
657 // Parse all the basic blocks.
658 do_all_blocks();
659
660 // Check for bailouts during conversion to graph
661 if (failing()) {
807 void Parse::build_exits() {
808 // make a clone of caller to prevent sharing of side-effects
809 _exits.set_map(_exits.clone_map());
810 _exits.clean_stack(_exits.sp());
811 _exits.sync_jvms();
812
813 RegionNode* region = new RegionNode(1);
814 record_for_igvn(region);
815 gvn().set_type_bottom(region);
816 _exits.set_control(region);
817
818 // Note: iophi and memphi are not transformed until do_exits.
819 Node* iophi = new PhiNode(region, Type::ABIO);
820 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
821 gvn().set_type_bottom(iophi);
822 gvn().set_type_bottom(memphi);
823 _exits.set_i_o(iophi);
824 _exits.set_all_memory(memphi);
825
826 // Add a return value to the exit state. (Do not push it yet.)
827 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
828 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
829 if (ret_type->isa_int()) {
830 BasicType ret_bt = method()->return_type()->basic_type();
831 if (ret_bt == T_BOOLEAN ||
832 ret_bt == T_CHAR ||
833 ret_bt == T_BYTE ||
834 ret_bt == T_SHORT) {
835 ret_type = TypeInt::INT;
836 }
837 }
838
839 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
840 // becomes loaded during the subsequent parsing, the loaded and unloaded
841 // types will not join when we transform and push in do_exits().
842 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
843 if (ret_oop_type && !ret_oop_type->is_loaded()) {
844 ret_type = TypeOopPtr::BOTTOM;
845 }
846 int ret_size = type2size[ret_type->basic_type()];
847 Node* ret_phi = new PhiNode(region, ret_type);
848 gvn().set_type_bottom(ret_phi);
849 _exits.ensure_stack(ret_size);
850 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
851 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
852 _exits.set_argument(0, ret_phi); // here is where the parser finds it
853 // Note: ret_phi is not yet pushed, until do_exits.
854 }
855 }
856
857 //----------------------------build_start_state-------------------------------
858 // Construct a state which contains only the incoming arguments from an
859 // unknown caller. The method & bci will be null & InvocationEntryBci.
860 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
861 int arg_size = tf->domain_sig()->cnt();
862 int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
863 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
864 SafePointNode* map = new SafePointNode(max_size, jvms);
865 jvms->set_map(map);
866 record_for_igvn(map);
867 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
868 Node_Notes* old_nn = default_node_notes();
869 if (old_nn != nullptr && has_method()) {
870 Node_Notes* entry_nn = old_nn->clone(this);
871 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
872 entry_jvms->set_offsets(0);
873 entry_jvms->set_bci(entry_bci());
874 entry_nn->set_jvms(entry_jvms);
875 set_default_node_notes(entry_nn);
876 }
877 PhaseGVN& gvn = *initial_gvn();
878 uint i = 0;
879 int arg_num = 0;
880 for (uint j = 0; i < (uint)arg_size; i++) {
881 const Type* t = tf->domain_sig()->field_at(i);
882 Node* parm = nullptr;
883 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
884 // Inline type arguments are not passed by reference: we get an argument per
885 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
886 GraphKit kit(jvms, &gvn);
887 kit.set_control(map->control());
888 Node* old_mem = map->memory();
889 // Use immutable memory for inline type loads and restore it below
890 kit.set_all_memory(C->immutable_memory());
891 parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
892 map->set_control(kit.control());
893 map->set_memory(old_mem);
894 } else {
895 parm = gvn.transform(new ParmNode(start, j++));
896 }
897 map->init_req(i, parm);
898 // Record all these guys for later GVN.
899 record_for_igvn(parm);
900 if (i >= TypeFunc::Parms && t != Type::HALF) {
901 arg_num++;
902 }
903 }
904 for (; i < map->req(); i++) {
905 map->init_req(i, top());
906 }
907 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
908 set_default_node_notes(old_nn);
909 return jvms;
910 }
911
912 //-----------------------------make_node_notes---------------------------------
913 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
914 if (caller_nn == nullptr) return nullptr;
915 Node_Notes* nn = caller_nn->clone(C);
916 JVMState* caller_jvms = nn->jvms();
917 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
918 jvms->set_offsets(0);
919 jvms->set_bci(_entry_bci);
920 nn->set_jvms(jvms);
921 return nn;
922 }
923
924
925 //--------------------------return_values--------------------------------------
926 void Compile::return_values(JVMState* jvms) {
927 GraphKit kit(jvms);
928 Node* ret = new ReturnNode(TypeFunc::Parms,
929 kit.control(),
930 kit.i_o(),
931 kit.reset_memory(),
932 kit.frameptr(),
933 kit.returnadr());
934 // Add zero or 1 return values
935 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
936 if (ret_size > 0) {
937 kit.inc_sp(-ret_size); // pop the return value(s)
938 kit.sync_jvms();
939 Node* res = kit.argument(0);
940 if (tf()->returns_inline_type_as_fields()) {
941 // Multiple return values (inline type fields): add as many edges
942 // to the Return node as returned values.
943 InlineTypeNode* vt = res->as_InlineType();
944 ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
945 if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
946 ret->init_req(TypeFunc::Parms, vt);
947 } else {
948 // Return the tagged klass pointer to signal scalarization to the caller
949 Node* tagged_klass = vt->tagged_klass(kit.gvn());
950 // Return null if the inline type is null (IsInit field is not set)
951 Node* conv = kit.gvn().transform(new ConvI2LNode(vt->get_is_init()));
952 Node* shl = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
953 Node* shr = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
954 tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
955 ret->init_req(TypeFunc::Parms, tagged_klass);
956 }
957 uint idx = TypeFunc::Parms + 1;
958 vt->pass_fields(&kit, ret, idx, false, false);
959 } else {
960 ret->add_req(res);
961 // Note: The second dummy edge is not needed by a ReturnNode.
962 }
963 }
964 // bind it to root
965 root()->add_req(ret);
966 record_for_igvn(ret);
967 initial_gvn()->transform(ret);
968 }
969
970 //------------------------rethrow_exceptions-----------------------------------
971 // Bind all exception states in the list into a single RethrowNode.
972 void Compile::rethrow_exceptions(JVMState* jvms) {
973 GraphKit kit(jvms);
974 if (!kit.has_exceptions()) return; // nothing to generate
975 // Load my combined exception state into the kit, with all phis transformed:
976 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
977 Node* ex_oop = kit.use_exception_state(ex_map);
978 RethrowNode* exit = new RethrowNode(kit.control(),
979 kit.i_o(), kit.reset_memory(),
980 kit.frameptr(), kit.returnadr(),
981 // like a return but with exception input
982 ex_oop);
1066 // to complete, we force all writes to complete.
1067 //
1068 // 2. Experimental VM option is used to force the barrier if any field
1069 // was written out in the constructor.
1070 //
1071 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1072 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1073 // MemBarVolatile is used before volatile load instead of after volatile
1074 // store, so there's no barrier after the store.
1075 // We want to guarantee the same behavior as on platforms with total store
1076 // order, although this is not required by the Java memory model.
1077 // In this case, we want to enforce visibility of volatile field
1078 // initializations which are performed in constructors.
1079 // So as with finals, we add a barrier here.
1080 //
1081 // "All bets are off" unless the first publication occurs after a
1082 // normal return from the constructor. We do not attempt to detect
1083 // such unusual early publications. But no barrier is needed on
1084 // exceptional returns, since they cannot publish normally.
1085 //
1086 if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1087 (wrote_final() || wrote_stable() ||
1088 (AlwaysSafeConstructors && wrote_fields()) ||
1089 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1090 Node* recorded_alloc = alloc_with_final_or_stable();
1091 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1092 recorded_alloc);
1093
1094 // If Memory barrier is created for final fields write
1095 // and allocation node does not escape the initialize method,
1096 // then barrier introduced by allocation node can be removed.
1097 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1098 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1099 alloc->compute_MemBar_redundancy(method());
1100 }
1101 if (PrintOpto && (Verbose || WizardMode)) {
1102 method()->print_name();
1103 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1104 }
1105 }
1106
1107 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1108 // transform each slice of the original memphi:
1109 mms.set_memory(_gvn.transform(mms.memory()));
1110 }
1111 // Clean up input MergeMems created by transforming the slices
1112 _gvn.transform(_exits.merged_memory());
1113
1114 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1115 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1116 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1117 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1118 // If the type we set for the ret_phi in build_exits() is too optimistic and
1119 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1120 // loading. It could also be due to an error, so mark this method as not compilable because
1121 // otherwise this could lead to an infinite compile loop.
1122 // In any case, this code path is rarely (and never in my testing) reached.
1123 C->record_method_not_compilable("Can't determine return type.");
1124 return;
1125 }
1126 if (ret_type->isa_int()) {
1127 BasicType ret_bt = method()->return_type()->basic_type();
1128 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1129 }
1130 _exits.push_node(ret_type->basic_type(), ret_phi);
1131 }
1132
1133 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1134
1135 // Unlock along the exceptional paths.
1189
1190 //-----------------------------create_entry_map-------------------------------
1191 // Initialize our parser map to contain the types at method entry.
1192 // For OSR, the map contains a single RawPtr parameter.
1193 // Initial monitor locking for sync. methods is performed by do_method_entry.
1194 SafePointNode* Parse::create_entry_map() {
1195 // Check for really stupid bail-out cases.
1196 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1197 if (len >= 32760) {
1198 // Bailout expected, this is a very rare edge case.
1199 C->record_method_not_compilable("too many local variables");
1200 return nullptr;
1201 }
1202
1203 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1204 _caller->map()->delete_replaced_nodes();
1205
1206 // If this is an inlined method, we may have to do a receiver null check.
1207 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1208 GraphKit kit(_caller);
1209 Node* receiver = kit.argument(0);
1210 Node* null_free = kit.null_check_receiver_before_call(method());
1211 _caller = kit.transfer_exceptions_into_jvms();
1212 if (receiver->is_InlineType() && receiver->as_InlineType()->is_larval()) {
1213 // Replace the larval inline type receiver in the exit map as well to make sure that
1214 // we can find and update it in Parse::do_call when we are done with the initialization.
1215 _exits.map()->replace_edge(receiver, null_free);
1216 }
1217 if (kit.stopped()) {
1218 _exits.add_exception_states_from(_caller);
1219 _exits.set_jvms(_caller);
1220 return nullptr;
1221 }
1222 }
1223
1224 assert(method() != nullptr, "parser must have a method");
1225
1226 // Create an initial safepoint to hold JVM state during parsing
1227 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1228 set_map(new SafePointNode(len, jvms));
1229 jvms->set_map(map());
1230 record_for_igvn(map());
1231 assert(jvms->endoff() == len, "correct jvms sizing");
1232
1233 SafePointNode* inmap = _caller->map();
1234 assert(inmap != nullptr, "must have inmap");
1235 // In case of null check on receiver above
1236 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1237
1238 uint i;
1239
1240 // Pass thru the predefined input parameters.
1241 for (i = 0; i < TypeFunc::Parms; i++) {
1242 map()->init_req(i, inmap->in(i));
1243 }
1244
1245 if (depth() == 1) {
1246 assert(map()->memory()->Opcode() == Op_Parm, "");
1247 // Insert the memory aliasing node
1248 set_all_memory(reset_memory());
1249 }
1250 assert(merged_memory(), "");
1251
1252 // Now add the locals which are initially bound to arguments:
1253 uint arg_size = tf()->domain_sig()->cnt();
1254 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1255 for (i = TypeFunc::Parms; i < arg_size; i++) {
1256 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1257 }
1258
1259 // Clear out the rest of the map (locals and stack)
1260 for (i = arg_size; i < len; i++) {
1261 map()->init_req(i, top());
1262 }
1263
1264 SafePointNode* entry_map = stop();
1265 return entry_map;
1266 }
1267
1268 //-----------------------------do_method_entry--------------------------------
1269 // Emit any code needed in the pseudo-block before BCI zero.
1270 // The main thing to do is lock the receiver of a synchronized method.
1271 void Parse::do_method_entry() {
1272 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1273 set_sp(0); // Java Stack Pointer
1307
1308 // If the method is synchronized, we need to construct a lock node, attach
1309 // it to the Start node, and pin it there.
1310 if (method()->is_synchronized()) {
1311 // Insert a FastLockNode right after the Start which takes as arguments
1312 // the current thread pointer, the "this" pointer & the address of the
1313 // stack slot pair used for the lock. The "this" pointer is a projection
1314 // off the start node, but the locking spot has to be constructed by
1315 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1316 // becomes the second argument to the FastLockNode call. The
1317 // FastLockNode becomes the new control parent to pin it to the start.
1318
1319 // Setup Object Pointer
1320 Node *lock_obj = nullptr;
1321 if (method()->is_static()) {
1322 ciInstance* mirror = _method->holder()->java_mirror();
1323 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1324 lock_obj = makecon(t_lock);
1325 } else { // Else pass the "this" pointer,
1326 lock_obj = local(0); // which is Parm0 from StartNode
1327 assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1328 }
1329 // Clear out dead values from the debug info.
1330 kill_dead_locals();
1331 // Build the FastLockNode
1332 _synch_lock = shared_lock(lock_obj);
1333 // Check for bailout in shared_lock
1334 if (failing()) { return; }
1335 }
1336
1337 // Feed profiling data for parameters to the type system so it can
1338 // propagate it as speculative types
1339 record_profiled_parameters_for_speculation();
1340 }
1341
1342 //------------------------------init_blocks------------------------------------
1343 // Initialize our parser map to contain the types/monitors at method entry.
1344 void Parse::init_blocks() {
1345 // Create the blocks.
1346 _block_count = flow()->block_count();
1347 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1743 //--------------------handle_missing_successor---------------------------------
1744 void Parse::handle_missing_successor(int target_bci) {
1745 #ifndef PRODUCT
1746 Block* b = block();
1747 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1748 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1749 #endif
1750 ShouldNotReachHere();
1751 }
1752
1753 //--------------------------merge_common---------------------------------------
1754 void Parse::merge_common(Parse::Block* target, int pnum) {
1755 if (TraceOptoParse) {
1756 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1757 }
1758
1759 // Zap extra stack slots to top
1760 assert(sp() == target->start_sp(), "");
1761 clean_stack(sp());
1762
1763 // Check for merge conflicts involving inline types
1764 JVMState* old_jvms = map()->jvms();
1765 int old_bci = bci();
1766 JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1767 tmp_jvms->set_should_reexecute(true);
1768 tmp_jvms->bind_map(map());
1769 // Execution needs to restart a the next bytecode (entry of next
1770 // block)
1771 if (target->is_merged() ||
1772 pnum > PhiNode::Input ||
1773 target->is_handler() ||
1774 target->is_loop_head()) {
1775 set_parse_bci(target->start());
1776 for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1777 Node* n = map()->in(j); // Incoming change to target state.
1778 const Type* t = nullptr;
1779 if (tmp_jvms->is_loc(j)) {
1780 t = target->local_type_at(j - tmp_jvms->locoff());
1781 } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1782 t = target->stack_type_at(j - tmp_jvms->stkoff());
1783 }
1784 if (t != nullptr && t != Type::BOTTOM) {
1785 if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1786 // Allocate inline type in src block to be able to merge it with oop in target block
1787 map()->set_req(j, n->as_InlineType()->buffer(this));
1788 } else if (!n->is_InlineType() && t->is_inlinetypeptr()) {
1789 // Scalarize null in src block to be able to merge it with inline type in target block
1790 assert(gvn().type(n)->is_zero_type(), "Should have been scalarized");
1791 map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1792 }
1793 }
1794 }
1795 }
1796 old_jvms->bind_map(map());
1797 set_parse_bci(old_bci);
1798
1799 if (!target->is_merged()) { // No prior mapping at this bci
1800 if (TraceOptoParse) { tty->print(" with empty state"); }
1801
1802 // If this path is dead, do not bother capturing it as a merge.
1803 // It is "as if" we had 1 fewer predecessors from the beginning.
1804 if (stopped()) {
1805 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1806 return;
1807 }
1808
1809 // Make a region if we know there are multiple or unpredictable inputs.
1810 // (Also, if this is a plain fall-through, we might see another region,
1811 // which must not be allowed into this block's map.)
1812 if (pnum > PhiNode::Input // Known multiple inputs.
1813 || target->is_handler() // These have unpredictable inputs.
1814 || target->is_loop_head() // Known multiple inputs
1815 || control()->is_Region()) { // We must hide this guy.
1816
1817 int current_bci = bci();
1818 set_parse_bci(target->start()); // Set target bci
1833 record_for_igvn(r);
1834 // zap all inputs to null for debugging (done in Node(uint) constructor)
1835 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1836 r->init_req(pnum, control());
1837 set_control(r);
1838 target->copy_irreducible_status_to(r, jvms());
1839 set_parse_bci(current_bci); // Restore bci
1840 }
1841
1842 // Convert the existing Parser mapping into a mapping at this bci.
1843 store_state_to(target);
1844 assert(target->is_merged(), "do not come here twice");
1845
1846 } else { // Prior mapping at this bci
1847 if (TraceOptoParse) { tty->print(" with previous state"); }
1848 #ifdef ASSERT
1849 if (target->is_SEL_head()) {
1850 target->mark_merged_backedge(block());
1851 }
1852 #endif
1853
1854 // We must not manufacture more phis if the target is already parsed.
1855 bool nophi = target->is_parsed();
1856
1857 SafePointNode* newin = map();// Hang on to incoming mapping
1858 Block* save_block = block(); // Hang on to incoming block;
1859 load_state_from(target); // Get prior mapping
1860
1861 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1862 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1863 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1864 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1865
1866 // Iterate over my current mapping and the old mapping.
1867 // Where different, insert Phi functions.
1868 // Use any existing Phi functions.
1869 assert(control()->is_Region(), "must be merging to a region");
1870 RegionNode* r = control()->as_Region();
1871
1872 // Compute where to merge into
1873 // Merge incoming control path
1874 r->init_req(pnum, newin->control());
1875
1876 if (pnum == 1) { // Last merge for this Region?
1877 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1878 Node* result = _gvn.transform(r);
1879 if (r != result && TraceOptoParse) {
1880 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1881 }
1882 }
1883 record_for_igvn(r);
1884 }
1885
1886 // Update all the non-control inputs to map:
1887 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1888 bool check_elide_phi = target->is_SEL_backedge(save_block);
1889 bool last_merge = (pnum == PhiNode::Input);
1890 for (uint j = 1; j < newin->req(); j++) {
1891 Node* m = map()->in(j); // Current state of target.
1892 Node* n = newin->in(j); // Incoming change to target state.
1893 PhiNode* phi;
1894 if (m->is_Phi() && m->as_Phi()->region() == r) {
1895 phi = m->as_Phi();
1896 } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1897 phi = m->as_InlineType()->get_oop()->as_Phi();
1898 } else {
1899 phi = nullptr;
1900 }
1901 if (m != n) { // Different; must merge
1902 switch (j) {
1903 // Frame pointer and Return Address never changes
1904 case TypeFunc::FramePtr:// Drop m, use the original value
1905 case TypeFunc::ReturnAdr:
1906 break;
1907 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1908 assert(phi == nullptr, "the merge contains phis, not vice versa");
1909 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1910 continue;
1911 default: // All normal stuff
1912 if (phi == nullptr) {
1913 const JVMState* jvms = map()->jvms();
1914 if (EliminateNestedLocks &&
1915 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1916 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1917 // Use old BoxLock node as merged box.
1918 assert(newin->jvms()->is_monitor_box(j), "sanity");
1919 // This assert also tests that nodes are BoxLock.
1920 assert(BoxLockNode::same_slot(n, m), "sanity");
1927 // Incremental Inlining before EA and Macro nodes elimination.
1928 //
1929 // Incremental Inlining is executed after IGVN optimizations
1930 // during which BoxLock can be marked as Coarsened.
1931 old_box->set_coarsened(); // Verifies state
1932 old_box->set_unbalanced();
1933 }
1934 C->gvn_replace_by(n, m);
1935 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1936 phi = ensure_phi(j, nophi);
1937 }
1938 }
1939 break;
1940 }
1941 }
1942 // At this point, n might be top if:
1943 // - there is no phi (because TypeFlow detected a conflict), or
1944 // - the corresponding control edges is top (a dead incoming path)
1945 // It is a bug if we create a phi which sees a garbage value on a live path.
1946
1947 // Merging two inline types?
1948 if (phi != nullptr && phi->bottom_type()->is_inlinetypeptr()) {
1949 // Reload current state because it may have been updated by ensure_phi
1950 m = map()->in(j);
1951 InlineTypeNode* vtm = m->as_InlineType(); // Current inline type
1952 InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
1953 assert(vtm->get_oop() == phi, "Inline type should have Phi input");
1954 if (TraceOptoParse) {
1955 #ifdef ASSERT
1956 tty->print_cr("\nMerging inline types");
1957 tty->print_cr("Current:");
1958 vtm->dump(2);
1959 tty->print_cr("Incoming:");
1960 vtn->dump(2);
1961 tty->cr();
1962 #endif
1963 }
1964 // Do the merge
1965 vtm->merge_with(&_gvn, vtn, pnum, last_merge);
1966 if (last_merge) {
1967 map()->set_req(j, _gvn.transform(vtm));
1968 record_for_igvn(vtm);
1969 }
1970 } else if (phi != nullptr) {
1971 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1972 assert(phi->region() == r, "");
1973 phi->set_req(pnum, n); // Then add 'n' to the merge
1974 if (last_merge) {
1975 // Last merge for this Phi.
1976 // So far, Phis have had a reasonable type from ciTypeFlow.
1977 // Now _gvn will join that with the meet of current inputs.
1978 // BOTTOM is never permissible here, 'cause pessimistically
1979 // Phis of pointers cannot lose the basic pointer type.
1980 debug_only(const Type* bt1 = phi->bottom_type());
1981 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1982 map()->set_req(j, _gvn.transform(phi));
1983 debug_only(const Type* bt2 = phi->bottom_type());
1984 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1985 record_for_igvn(phi);
1986 }
1987 }
1988 } // End of for all values to be merged
1989
1990 if (last_merge && !r->in(0)) { // The occasional useless Region
1991 assert(control() == r, "");
1992 set_control(r->nonnull_req());
1993 }
1994
1995 map()->merge_replaced_nodes_with(newin);
1996
1997 // newin has been subsumed into the lazy merge, and is now dead.
1998 set_block(save_block);
1999
2000 stop(); // done with this guy, for now
2001 }
2002
2003 if (TraceOptoParse) {
2004 tty->print_cr(" on path %d", pnum);
2005 }
2006
2007 // Done with this parser state.
2008 assert(stopped(), "");
2009 }
2010
2122
2123 // Add new path to the region.
2124 uint pnum = r->req();
2125 r->add_req(nullptr);
2126
2127 for (uint i = 1; i < map->req(); i++) {
2128 Node* n = map->in(i);
2129 if (i == TypeFunc::Memory) {
2130 // Ensure a phi on all currently known memories.
2131 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2132 Node* phi = mms.memory();
2133 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2134 assert(phi->req() == pnum, "must be same size as region");
2135 phi->add_req(nullptr);
2136 }
2137 }
2138 } else {
2139 if (n->is_Phi() && n->as_Phi()->region() == r) {
2140 assert(n->req() == pnum, "must be same size as region");
2141 n->add_req(nullptr);
2142 } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2143 n->as_InlineType()->add_new_path(r);
2144 }
2145 }
2146 }
2147
2148 return pnum;
2149 }
2150
2151 //------------------------------ensure_phi-------------------------------------
2152 // Turn the idx'th entry of the current map into a Phi
2153 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2154 SafePointNode* map = this->map();
2155 Node* region = map->control();
2156 assert(region->is_Region(), "");
2157
2158 Node* o = map->in(idx);
2159 assert(o != nullptr, "");
2160
2161 if (o == top()) return nullptr; // TOP always merges into TOP
2162
2163 if (o->is_Phi() && o->as_Phi()->region() == region) {
2164 return o->as_Phi();
2165 }
2166 InlineTypeNode* vt = o->isa_InlineType();
2167 if (vt != nullptr && vt->has_phi_inputs(region)) {
2168 return vt->get_oop()->as_Phi();
2169 }
2170
2171 // Now use a Phi here for merging
2172 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2173 const JVMState* jvms = map->jvms();
2174 const Type* t = nullptr;
2175 if (jvms->is_loc(idx)) {
2176 t = block()->local_type_at(idx - jvms->locoff());
2177 } else if (jvms->is_stk(idx)) {
2178 t = block()->stack_type_at(idx - jvms->stkoff());
2179 } else if (jvms->is_mon(idx)) {
2180 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2181 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2182 } else if ((uint)idx < TypeFunc::Parms) {
2183 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2184 } else {
2185 assert(false, "no type information for this phi");
2186 }
2187
2188 // If the type falls to bottom, then this must be a local that
2189 // is already dead or is mixing ints and oops or some such.
2190 // Forcing it to top makes it go dead.
2191 if (t == Type::BOTTOM) {
2192 map->set_req(idx, top());
2193 return nullptr;
2194 }
2195
2196 // Do not create phis for top either.
2197 // A top on a non-null control flow must be an unused even after the.phi.
2198 if (t == Type::TOP || t == Type::HALF) {
2199 map->set_req(idx, top());
2200 return nullptr;
2201 }
2202
2203 if (vt != nullptr && t->is_inlinetypeptr()) {
2204 // Inline types are merged by merging their field values.
2205 // Create a cloned InlineTypeNode with phi inputs that
2206 // represents the merged inline type and update the map.
2207 vt = vt->clone_with_phis(&_gvn, region);
2208 map->set_req(idx, vt);
2209 return vt->get_oop()->as_Phi();
2210 } else {
2211 PhiNode* phi = PhiNode::make(region, o, t);
2212 gvn().set_type(phi, t);
2213 if (C->do_escape_analysis()) record_for_igvn(phi);
2214 map->set_req(idx, phi);
2215 return phi;
2216 }
2217 }
2218
2219 //--------------------------ensure_memory_phi----------------------------------
2220 // Turn the idx'th slice of the current memory into a Phi
2221 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2222 MergeMemNode* mem = merged_memory();
2223 Node* region = control();
2224 assert(region->is_Region(), "");
2225
2226 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2227 assert(o != nullptr && o != top(), "");
2228
2229 PhiNode* phi;
2230 if (o->is_Phi() && o->as_Phi()->region() == region) {
2231 phi = o->as_Phi();
2232 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2233 // clone the shared base memory phi to make a new memory split
2234 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2235 const Type* t = phi->bottom_type();
2236 const TypePtr* adr_type = C->get_adr_type(idx);
2326 // Add check to deoptimize once holder klass is fully initialized.
2327 void Parse::clinit_deopt() {
2328 assert(C->has_method(), "only for normal compilations");
2329 assert(depth() == 1, "only for main compiled method");
2330 assert(is_normal_parse(), "no barrier needed on osr entry");
2331 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2332
2333 set_parse_bci(0);
2334
2335 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2336 guard_klass_being_initialized(holder);
2337 }
2338
2339 //------------------------------return_current---------------------------------
2340 // Append current _map to _exit_return
2341 void Parse::return_current(Node* value) {
2342 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2343 call_register_finalizer();
2344 }
2345
2346 // frame pointer is always same, already captured
2347 if (value != nullptr) {
2348 Node* phi = _exits.argument(0);
2349 const Type* return_type = phi->bottom_type();
2350 const TypeInstPtr* tr = return_type->isa_instptr();
2351 assert(!value->is_InlineType() || !value->as_InlineType()->is_larval(), "returning a larval");
2352 if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2353 return_type->is_inlinetypeptr()) {
2354 // Inline type is returned as fields, make sure it is scalarized
2355 if (!value->is_InlineType()) {
2356 value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass(), false);
2357 }
2358 if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2359 // Returning from root or an incrementally inlined method. Make sure all non-flat
2360 // fields are buffered and re-execute if allocation triggers deoptimization.
2361 PreserveReexecuteState preexecs(this);
2362 assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2363 jvms()->set_should_reexecute(true);
2364 inc_sp(1);
2365 value = value->as_InlineType()->allocate_fields(this);
2366 }
2367 } else if (value->is_InlineType()) {
2368 // Inline type is returned as oop, make sure it is buffered and re-execute
2369 // if allocation triggers deoptimization.
2370 PreserveReexecuteState preexecs(this);
2371 jvms()->set_should_reexecute(true);
2372 inc_sp(1);
2373 value = value->as_InlineType()->buffer(this);
2374 }
2375 // ...else
2376 // If returning oops to an interface-return, there is a silent free
2377 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2378 phi->add_req(value);
2379 }
2380
2381 // Do not set_parse_bci, so that return goo is credited to the return insn.
2382 set_bci(InvocationEntryBci);
2383 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2384 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2385 }
2386 if (C->env()->dtrace_method_probes()) {
2387 make_dtrace_method_exit(method());
2388 }
2389
2390 SafePointNode* exit_return = _exits.map();
2391 exit_return->in( TypeFunc::Control )->add_req( control() );
2392 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2393 Node *mem = exit_return->in( TypeFunc::Memory );
2394 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2395 if (mms.is_empty()) {
2396 // get a copy of the base memory, and patch just this one input
2397 const TypePtr* adr_type = mms.adr_type(C);
2398 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2399 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2400 gvn().set_type_bottom(phi);
2401 phi->del_req(phi->req()-1); // prepare to re-patch
2402 mms.set_memory(phi);
2403 }
2404 mms.memory()->add_req(mms.memory2());
2405 }
2406
2407 if (_first_return) {
2408 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2409 _first_return = false;
2410 } else {
2411 _exits.map()->merge_replaced_nodes_with(map());
2412 }
2413
2414 stop_and_kill_map(); // This CFG path dies here
2415 }
2416
2417
2418 //------------------------------add_safepoint----------------------------------
2419 void Parse::add_safepoint() {
2420 uint parms = TypeFunc::Parms+1;
2421
2422 // Clear out dead values from the debug info.
2423 kill_dead_locals();
2424
2425 // Clone the JVM State
2426 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|