12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "interpreter/linkResolver.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/method.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/c2compiler.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/idealGraphPrinter.hpp"
33 #include "opto/locknode.hpp"
34 #include "opto/memnode.hpp"
35 #include "opto/opaquenode.hpp"
36 #include "opto/parse.hpp"
37 #include "opto/rootnode.hpp"
38 #include "opto/runtime.hpp"
39 #include "opto/type.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/safepointMechanism.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "utilities/bitMap.inline.hpp"
44 #include "utilities/copy.hpp"
45
46 // Static array so we can figure out which bytecodes stop us from compiling
47 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
48 // and eventually should be encapsulated in a proper class (gri 8/18/98).
49
50 #ifndef PRODUCT
51 uint nodes_created = 0;
52 uint methods_parsed = 0;
84 }
85 if (all_null_checks_found) {
86 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
87 (100*implicit_null_checks)/all_null_checks_found);
88 }
89 if (SharedRuntime::_implicit_null_throws) {
90 tty->print_cr("%u implicit null exceptions at runtime",
91 SharedRuntime::_implicit_null_throws);
92 }
93
94 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
95 BytecodeParseHistogram::print();
96 }
97 }
98 #endif
99
100 //------------------------------ON STACK REPLACEMENT---------------------------
101
102 // Construct a node which can be used to get incoming state for
103 // on stack replacement.
104 Node *Parse::fetch_interpreter_state(int index,
105 BasicType bt,
106 Node *local_addrs,
107 Node *local_addrs_base) {
108 Node *mem = memory(Compile::AliasIdxRaw);
109 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
110 Node *ctl = control();
111
112 // Very similar to LoadNode::make, except we handle un-aligned longs and
113 // doubles on Sparc. Intel can handle them just fine directly.
114 Node *l = nullptr;
115 switch (bt) { // Signature is flattened
116 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
117 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
118 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
119 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
120 case T_LONG:
121 case T_DOUBLE: {
122 // Since arguments are in reverse order, the argument address 'adr'
123 // refers to the back half of the long/double. Recompute adr.
124 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
125 if (Matcher::misaligned_doubles_ok) {
126 l = (bt == T_DOUBLE)
127 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
128 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
129 } else {
130 l = (bt == T_DOUBLE)
131 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
132 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
133 }
134 break;
135 }
136 default: ShouldNotReachHere();
137 }
138 return _gvn.transform(l);
139 }
140
141 // Helper routine to prevent the interpreter from handing
142 // unexpected typestate to an OSR method.
143 // The Node l is a value newly dug out of the interpreter frame.
144 // The type is the type predicted by ciTypeFlow. Note that it is
145 // not a general type, but can only come from Type::get_typeflow_type.
146 // The safepoint is a map which will feed an uncommon trap.
147 Node* Parse::check_interpreter_type(Node* l, const Type* type,
148 SafePointNode* &bad_type_exit) {
149
150 const TypeOopPtr* tp = type->isa_oopptr();
151
152 // TypeFlow may assert null-ness if a type appears unloaded.
153 if (type == TypePtr::NULL_PTR ||
154 (tp != nullptr && !tp->is_loaded())) {
155 // Value must be null, not a real oop.
156 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
157 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
158 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
159 set_control(_gvn.transform( new IfTrueNode(iff) ));
160 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
161 bad_type_exit->control()->add_req(bad_type);
162 l = null();
163 }
164
165 // Typeflow can also cut off paths from the CFG, based on
166 // types which appear unloaded, or call sites which appear unlinked.
167 // When paths are cut off, values at later merge points can rise
168 // toward more specific classes. Make sure these specific classes
169 // are still in effect.
170 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
171 // TypeFlow asserted a specific object type. Value must have that type.
172 Node* bad_type_ctrl = nullptr;
173 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
174 bad_type_exit->control()->add_req(bad_type_ctrl);
175 }
176
177 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
178 return l;
179 }
180
181 // Helper routine which sets up elements of the initial parser map when
182 // performing a parse for on stack replacement. Add values into map.
183 // The only parameter contains the address of a interpreter arguments.
184 void Parse::load_interpreter_state(Node* osr_buf) {
185 int index;
186 int max_locals = jvms()->loc_size();
187 int max_stack = jvms()->stk_size();
188
189
190 // Mismatch between method and jvms can occur since map briefly held
191 // an OSR entry state (which takes up one RawPtr word).
192 assert(max_locals == method()->max_locals(), "sanity");
193 assert(max_stack >= method()->max_stack(), "sanity");
194 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
195 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
196
197 // Find the start block.
198 Block* osr_block = start_block();
199 assert(osr_block->start() == osr_bci(), "sanity");
200
201 // Set initial BCI.
202 set_parse_bci(osr_block->start());
203
204 // Set initial stack depth.
205 set_sp(osr_block->start_sp());
206
207 // Check bailouts. We currently do not perform on stack replacement
208 // of loops in catch blocks or loops which branch with a non-empty stack.
209 if (sp() != 0) {
224 for (index = 0; index < mcnt; index++) {
225 // Make a BoxLockNode for the monitor.
226 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
227 // Check for bailout after new BoxLockNode
228 if (failing()) { return; }
229
230 // This OSR locking region is unbalanced because it does not have Lock node:
231 // locking was done in Interpreter.
232 // This is similar to Coarsened case when Lock node is eliminated
233 // and as result the region is marked as Unbalanced.
234
235 // Emulate Coarsened state transition from Regular to Unbalanced.
236 osr_box->set_coarsened();
237 osr_box->set_unbalanced();
238
239 Node* box = _gvn.transform(osr_box);
240
241 // Displaced headers and locked objects are interleaved in the
242 // temp OSR buffer. We only copy the locked objects out here.
243 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
244 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
245 // Try and copy the displaced header to the BoxNode
246 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
247
248
249 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
250
251 // Build a bogus FastLockNode (no code will be generated) and push the
252 // monitor into our debug info.
253 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
254 map()->push_monitor(flock);
255
256 // If the lock is our method synchronization lock, tuck it away in
257 // _sync_lock for return and rethrow exit paths.
258 if (index == 0 && method()->is_synchronized()) {
259 _synch_lock = flock;
260 }
261 }
262
263 // Use the raw liveness computation to make sure that unexpected
264 // values don't propagate into the OSR frame.
265 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
266 if (!live_locals.is_valid()) {
267 // Degenerate or breakpointed method.
295 if (C->log() != nullptr) {
296 C->log()->elem("OSR_mismatch local_index='%d'",index);
297 }
298 set_local(index, null());
299 // and ignore it for the loads
300 continue;
301 }
302 }
303
304 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
305 if (type == Type::TOP || type == Type::HALF) {
306 continue;
307 }
308 // If the type falls to bottom, then this must be a local that
309 // is mixing ints and oops or some such. Forcing it to top
310 // makes it go dead.
311 if (type == Type::BOTTOM) {
312 continue;
313 }
314 // Construct code to access the appropriate local.
315 BasicType bt = type->basic_type();
316 if (type == TypePtr::NULL_PTR) {
317 // Ptr types are mixed together with T_ADDRESS but null is
318 // really for T_OBJECT types so correct it.
319 bt = T_OBJECT;
320 }
321 Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
322 set_local(index, value);
323 }
324
325 // Extract the needed stack entries from the interpreter frame.
326 for (index = 0; index < sp(); index++) {
327 const Type *type = osr_block->stack_type_at(index);
328 if (type != Type::TOP) {
329 // Currently the compiler bails out when attempting to on stack replace
330 // at a bci with a non-empty stack. We should not reach here.
331 ShouldNotReachHere();
332 }
333 }
334
335 // End the OSR migration
336 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
337 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
338 "OSR_migration_end", TypeRawPtr::BOTTOM,
339 osr_buf);
340
341 // Now that the interpreter state is loaded, make sure it will match
352 if (type->isa_oopptr() != nullptr) {
353 if (!live_oops.at(index)) {
354 // skip type check for dead oops
355 continue;
356 }
357 }
358 if (osr_block->flow()->local_type_at(index)->is_return_address()) {
359 // In our current system it's illegal for jsr addresses to be
360 // live into an OSR entry point because the compiler performs
361 // inlining of jsrs. ciTypeFlow has a bailout that detect this
362 // case and aborts the compile if addresses are live into an OSR
363 // entry point. Because of that we can assume that any address
364 // locals at the OSR entry point are dead. Method liveness
365 // isn't precise enough to figure out that they are dead in all
366 // cases so simply skip checking address locals all
367 // together. Any type check is guaranteed to fail since the
368 // interpreter type is the result of a load which might have any
369 // value and the expected type is a constant.
370 continue;
371 }
372 set_local(index, check_interpreter_type(l, type, bad_type_exit));
373 }
374
375 for (index = 0; index < sp(); index++) {
376 if (stopped()) break;
377 Node* l = stack(index);
378 if (l->is_top()) continue; // nothing here
379 const Type *type = osr_block->stack_type_at(index);
380 set_stack(index, check_interpreter_type(l, type, bad_type_exit));
381 }
382
383 if (bad_type_exit->control()->req() > 1) {
384 // Build an uncommon trap here, if any inputs can be unexpected.
385 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
386 record_for_igvn(bad_type_exit->control());
387 SafePointNode* types_are_good = map();
388 set_map(bad_type_exit);
389 // The unexpected type happens because a new edge is active
390 // in the CFG, which typeflow had previously ignored.
391 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
392 // This x will be typed as Integer if notReached is not yet linked.
393 // It could also happen due to a problem in ciTypeFlow analysis.
394 uncommon_trap(Deoptimization::Reason_constraint,
395 Deoptimization::Action_reinterpret);
396 set_map(types_are_good);
397 }
398 }
399
400 //------------------------------Parse------------------------------------------
501 // either breakpoint setting or hotswapping of methods may
502 // cause deoptimization.
503 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
504 C->dependencies()->assert_evol_method(method());
505 }
506
507 NOT_PRODUCT(methods_seen++);
508
509 // Do some special top-level things.
510 if (depth() == 1 && C->is_osr_compilation()) {
511 _tf = C->tf(); // the OSR entry type is different
512 _entry_bci = C->entry_bci();
513 _flow = method()->get_osr_flow_analysis(osr_bci());
514 } else {
515 _tf = TypeFunc::make(method());
516 _entry_bci = InvocationEntryBci;
517 _flow = method()->get_flow_analysis();
518 }
519
520 if (_flow->failing()) {
521 assert(false, "type flow analysis failed during parsing");
522 C->record_method_not_compilable(_flow->failure_reason());
523 #ifndef PRODUCT
524 if (PrintOpto && (Verbose || WizardMode)) {
525 if (is_osr_parse()) {
526 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
527 } else {
528 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
529 }
530 if (Verbose) {
531 method()->print();
532 method()->print_codes();
533 _flow->print();
534 }
535 }
536 #endif
537 }
538
539 #ifdef ASSERT
540 if (depth() == 1) {
541 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
592 load_interpreter_state(osr_buf);
593 } else {
594 set_map(entry_map);
595 do_method_entry();
596 }
597
598 if (depth() == 1 && !failing()) {
599 if (C->clinit_barrier_on_entry()) {
600 // Add check to deoptimize the nmethod once the holder class is fully initialized
601 clinit_deopt();
602 }
603 }
604
605 // Check for bailouts during method entry.
606 if (failing()) {
607 if (log) log->done("parse");
608 C->set_default_node_notes(caller_nn);
609 return;
610 }
611
612 entry_map = map(); // capture any changes performed by method setup code
613 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
614
615 // We begin parsing as if we have just encountered a jump to the
616 // method entry.
617 Block* entry_block = start_block();
618 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
619 set_map_clone(entry_map);
620 merge_common(entry_block, entry_block->next_path_num());
621
622 #ifndef PRODUCT
623 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
624 set_parse_histogram( parse_histogram_obj );
625 #endif
626
627 // Parse all the basic blocks.
628 do_all_blocks();
629
630 // Check for bailouts during conversion to graph
631 if (failing()) {
777 void Parse::build_exits() {
778 // make a clone of caller to prevent sharing of side-effects
779 _exits.set_map(_exits.clone_map());
780 _exits.clean_stack(_exits.sp());
781 _exits.sync_jvms();
782
783 RegionNode* region = new RegionNode(1);
784 record_for_igvn(region);
785 gvn().set_type_bottom(region);
786 _exits.set_control(region);
787
788 // Note: iophi and memphi are not transformed until do_exits.
789 Node* iophi = new PhiNode(region, Type::ABIO);
790 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
791 gvn().set_type_bottom(iophi);
792 gvn().set_type_bottom(memphi);
793 _exits.set_i_o(iophi);
794 _exits.set_all_memory(memphi);
795
796 // Add a return value to the exit state. (Do not push it yet.)
797 if (tf()->range()->cnt() > TypeFunc::Parms) {
798 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
799 if (ret_type->isa_int()) {
800 BasicType ret_bt = method()->return_type()->basic_type();
801 if (ret_bt == T_BOOLEAN ||
802 ret_bt == T_CHAR ||
803 ret_bt == T_BYTE ||
804 ret_bt == T_SHORT) {
805 ret_type = TypeInt::INT;
806 }
807 }
808
809 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
810 // becomes loaded during the subsequent parsing, the loaded and unloaded
811 // types will not join when we transform and push in do_exits().
812 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
813 if (ret_oop_type && !ret_oop_type->is_loaded()) {
814 ret_type = TypeOopPtr::BOTTOM;
815 }
816 int ret_size = type2size[ret_type->basic_type()];
817 Node* ret_phi = new PhiNode(region, ret_type);
818 gvn().set_type_bottom(ret_phi);
819 _exits.ensure_stack(ret_size);
820 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
821 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
822 _exits.set_argument(0, ret_phi); // here is where the parser finds it
823 // Note: ret_phi is not yet pushed, until do_exits.
824 }
825 }
826
827
828 //----------------------------build_start_state-------------------------------
829 // Construct a state which contains only the incoming arguments from an
830 // unknown caller. The method & bci will be null & InvocationEntryBci.
831 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
832 int arg_size = tf->domain()->cnt();
833 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
834 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
835 SafePointNode* map = new SafePointNode(max_size, jvms);
836 record_for_igvn(map);
837 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
838 Node_Notes* old_nn = default_node_notes();
839 if (old_nn != nullptr && has_method()) {
840 Node_Notes* entry_nn = old_nn->clone(this);
841 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
842 entry_jvms->set_offsets(0);
843 entry_jvms->set_bci(entry_bci());
844 entry_nn->set_jvms(entry_jvms);
845 set_default_node_notes(entry_nn);
846 }
847 uint i;
848 for (i = 0; i < (uint)arg_size; i++) {
849 Node* parm = initial_gvn()->transform(new ParmNode(start, i));
850 map->init_req(i, parm);
851 // Record all these guys for later GVN.
852 record_for_igvn(parm);
853 }
854 for (; i < map->req(); i++) {
855 map->init_req(i, top());
856 }
857 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
858 set_default_node_notes(old_nn);
859 jvms->set_map(map);
860 return jvms;
861 }
862
863 //-----------------------------make_node_notes---------------------------------
864 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
865 if (caller_nn == nullptr) return nullptr;
866 Node_Notes* nn = caller_nn->clone(C);
867 JVMState* caller_jvms = nn->jvms();
868 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
869 jvms->set_offsets(0);
870 jvms->set_bci(_entry_bci);
871 nn->set_jvms(jvms);
872 return nn;
873 }
874
875
876 //--------------------------return_values--------------------------------------
877 void Compile::return_values(JVMState* jvms) {
878 GraphKit kit(jvms);
879 Node* ret = new ReturnNode(TypeFunc::Parms,
880 kit.control(),
881 kit.i_o(),
882 kit.reset_memory(),
883 kit.frameptr(),
884 kit.returnadr());
885 // Add zero or 1 return values
886 int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
887 if (ret_size > 0) {
888 kit.inc_sp(-ret_size); // pop the return value(s)
889 kit.sync_jvms();
890 ret->add_req(kit.argument(0));
891 // Note: The second dummy edge is not needed by a ReturnNode.
892 }
893 // bind it to root
894 root()->add_req(ret);
895 record_for_igvn(ret);
896 initial_gvn()->transform(ret);
897 }
898
899 //------------------------rethrow_exceptions-----------------------------------
900 // Bind all exception states in the list into a single RethrowNode.
901 void Compile::rethrow_exceptions(JVMState* jvms) {
902 GraphKit kit(jvms);
903 if (!kit.has_exceptions()) return; // nothing to generate
904 // Load my combined exception state into the kit, with all phis transformed:
905 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
906 Node* ex_oop = kit.use_exception_state(ex_map);
907 RethrowNode* exit = new RethrowNode(kit.control(),
908 kit.i_o(), kit.reset_memory(),
909 kit.frameptr(), kit.returnadr(),
910 // like a return but with exception input
911 ex_oop);
995 // to complete, we force all writes to complete.
996 //
997 // 2. Experimental VM option is used to force the barrier if any field
998 // was written out in the constructor.
999 //
1000 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1001 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1002 // MemBarVolatile is used before volatile load instead of after volatile
1003 // store, so there's no barrier after the store.
1004 // We want to guarantee the same behavior as on platforms with total store
1005 // order, although this is not required by the Java memory model.
1006 // In this case, we want to enforce visibility of volatile field
1007 // initializations which are performed in constructors.
1008 // So as with finals, we add a barrier here.
1009 //
1010 // "All bets are off" unless the first publication occurs after a
1011 // normal return from the constructor. We do not attempt to detect
1012 // such unusual early publications. But no barrier is needed on
1013 // exceptional returns, since they cannot publish normally.
1014 //
1015 if (method()->is_object_initializer() &&
1016 (wrote_final() || wrote_stable() ||
1017 (AlwaysSafeConstructors && wrote_fields()) ||
1018 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1019 Node* recorded_alloc = alloc_with_final_or_stable();
1020 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1021 recorded_alloc);
1022
1023 // If Memory barrier is created for final fields write
1024 // and allocation node does not escape the initialize method,
1025 // then barrier introduced by allocation node can be removed.
1026 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1027 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1028 alloc->compute_MemBar_redundancy(method());
1029 }
1030 if (PrintOpto && (Verbose || WizardMode)) {
1031 method()->print_name();
1032 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1033 }
1034 }
1035
1036 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1037 // transform each slice of the original memphi:
1038 mms.set_memory(_gvn.transform(mms.memory()));
1039 }
1040 // Clean up input MergeMems created by transforming the slices
1041 _gvn.transform(_exits.merged_memory());
1042
1043 if (tf()->range()->cnt() > TypeFunc::Parms) {
1044 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1045 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1046 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1047 // If the type we set for the ret_phi in build_exits() is too optimistic and
1048 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1049 // loading. It could also be due to an error, so mark this method as not compilable because
1050 // otherwise this could lead to an infinite compile loop.
1051 // In any case, this code path is rarely (and never in my testing) reached.
1052 C->record_method_not_compilable("Can't determine return type.");
1053 return;
1054 }
1055 if (ret_type->isa_int()) {
1056 BasicType ret_bt = method()->return_type()->basic_type();
1057 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1058 }
1059 _exits.push_node(ret_type->basic_type(), ret_phi);
1060 }
1061
1062 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1063
1064 // Unlock along the exceptional paths.
1118
1119 //-----------------------------create_entry_map-------------------------------
1120 // Initialize our parser map to contain the types at method entry.
1121 // For OSR, the map contains a single RawPtr parameter.
1122 // Initial monitor locking for sync. methods is performed by do_method_entry.
1123 SafePointNode* Parse::create_entry_map() {
1124 // Check for really stupid bail-out cases.
1125 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1126 if (len >= 32760) {
1127 // Bailout expected, this is a very rare edge case.
1128 C->record_method_not_compilable("too many local variables");
1129 return nullptr;
1130 }
1131
1132 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1133 _caller->map()->delete_replaced_nodes();
1134
1135 // If this is an inlined method, we may have to do a receiver null check.
1136 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1137 GraphKit kit(_caller);
1138 kit.null_check_receiver_before_call(method());
1139 _caller = kit.transfer_exceptions_into_jvms();
1140 if (kit.stopped()) {
1141 _exits.add_exception_states_from(_caller);
1142 _exits.set_jvms(_caller);
1143 return nullptr;
1144 }
1145 }
1146
1147 assert(method() != nullptr, "parser must have a method");
1148
1149 // Create an initial safepoint to hold JVM state during parsing
1150 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1151 set_map(new SafePointNode(len, jvms));
1152 jvms->set_map(map());
1153 record_for_igvn(map());
1154 assert(jvms->endoff() == len, "correct jvms sizing");
1155
1156 SafePointNode* inmap = _caller->map();
1157 assert(inmap != nullptr, "must have inmap");
1158 // In case of null check on receiver above
1159 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1160
1161 uint i;
1162
1163 // Pass thru the predefined input parameters.
1164 for (i = 0; i < TypeFunc::Parms; i++) {
1165 map()->init_req(i, inmap->in(i));
1166 }
1167
1168 if (depth() == 1) {
1169 assert(map()->memory()->Opcode() == Op_Parm, "");
1170 // Insert the memory aliasing node
1171 set_all_memory(reset_memory());
1172 }
1173 assert(merged_memory(), "");
1174
1175 // Now add the locals which are initially bound to arguments:
1176 uint arg_size = tf()->domain()->cnt();
1177 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1178 for (i = TypeFunc::Parms; i < arg_size; i++) {
1179 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1180 }
1181
1182 // Clear out the rest of the map (locals and stack)
1183 for (i = arg_size; i < len; i++) {
1184 map()->init_req(i, top());
1185 }
1186
1187 SafePointNode* entry_map = stop();
1188 return entry_map;
1189 }
1190
1191 //-----------------------------do_method_entry--------------------------------
1192 // Emit any code needed in the pseudo-block before BCI zero.
1193 // The main thing to do is lock the receiver of a synchronized method.
1194 void Parse::do_method_entry() {
1195 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1196 set_sp(0); // Java Stack Pointer
1230
1231 // If the method is synchronized, we need to construct a lock node, attach
1232 // it to the Start node, and pin it there.
1233 if (method()->is_synchronized()) {
1234 // Insert a FastLockNode right after the Start which takes as arguments
1235 // the current thread pointer, the "this" pointer & the address of the
1236 // stack slot pair used for the lock. The "this" pointer is a projection
1237 // off the start node, but the locking spot has to be constructed by
1238 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1239 // becomes the second argument to the FastLockNode call. The
1240 // FastLockNode becomes the new control parent to pin it to the start.
1241
1242 // Setup Object Pointer
1243 Node *lock_obj = nullptr;
1244 if (method()->is_static()) {
1245 ciInstance* mirror = _method->holder()->java_mirror();
1246 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1247 lock_obj = makecon(t_lock);
1248 } else { // Else pass the "this" pointer,
1249 lock_obj = local(0); // which is Parm0 from StartNode
1250 }
1251 // Clear out dead values from the debug info.
1252 kill_dead_locals();
1253 // Build the FastLockNode
1254 _synch_lock = shared_lock(lock_obj);
1255 // Check for bailout in shared_lock
1256 if (failing()) { return; }
1257 }
1258
1259 // Feed profiling data for parameters to the type system so it can
1260 // propagate it as speculative types
1261 record_profiled_parameters_for_speculation();
1262 }
1263
1264 //------------------------------init_blocks------------------------------------
1265 // Initialize our parser map to contain the types/monitors at method entry.
1266 void Parse::init_blocks() {
1267 // Create the blocks.
1268 _block_count = flow()->block_count();
1269 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1665 //--------------------handle_missing_successor---------------------------------
1666 void Parse::handle_missing_successor(int target_bci) {
1667 #ifndef PRODUCT
1668 Block* b = block();
1669 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1670 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1671 #endif
1672 ShouldNotReachHere();
1673 }
1674
1675 //--------------------------merge_common---------------------------------------
1676 void Parse::merge_common(Parse::Block* target, int pnum) {
1677 if (TraceOptoParse) {
1678 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1679 }
1680
1681 // Zap extra stack slots to top
1682 assert(sp() == target->start_sp(), "");
1683 clean_stack(sp());
1684
1685 if (!target->is_merged()) { // No prior mapping at this bci
1686 if (TraceOptoParse) { tty->print(" with empty state"); }
1687
1688 // If this path is dead, do not bother capturing it as a merge.
1689 // It is "as if" we had 1 fewer predecessors from the beginning.
1690 if (stopped()) {
1691 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1692 return;
1693 }
1694
1695 // Make a region if we know there are multiple or unpredictable inputs.
1696 // (Also, if this is a plain fall-through, we might see another region,
1697 // which must not be allowed into this block's map.)
1698 if (pnum > PhiNode::Input // Known multiple inputs.
1699 || target->is_handler() // These have unpredictable inputs.
1700 || target->is_loop_head() // Known multiple inputs
1701 || control()->is_Region()) { // We must hide this guy.
1702
1703 int current_bci = bci();
1704 set_parse_bci(target->start()); // Set target bci
1719 record_for_igvn(r);
1720 // zap all inputs to null for debugging (done in Node(uint) constructor)
1721 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1722 r->init_req(pnum, control());
1723 set_control(r);
1724 target->copy_irreducible_status_to(r, jvms());
1725 set_parse_bci(current_bci); // Restore bci
1726 }
1727
1728 // Convert the existing Parser mapping into a mapping at this bci.
1729 store_state_to(target);
1730 assert(target->is_merged(), "do not come here twice");
1731
1732 } else { // Prior mapping at this bci
1733 if (TraceOptoParse) { tty->print(" with previous state"); }
1734 #ifdef ASSERT
1735 if (target->is_SEL_head()) {
1736 target->mark_merged_backedge(block());
1737 }
1738 #endif
1739 // We must not manufacture more phis if the target is already parsed.
1740 bool nophi = target->is_parsed();
1741
1742 SafePointNode* newin = map();// Hang on to incoming mapping
1743 Block* save_block = block(); // Hang on to incoming block;
1744 load_state_from(target); // Get prior mapping
1745
1746 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1747 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1748 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1749 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1750
1751 // Iterate over my current mapping and the old mapping.
1752 // Where different, insert Phi functions.
1753 // Use any existing Phi functions.
1754 assert(control()->is_Region(), "must be merging to a region");
1755 RegionNode* r = control()->as_Region();
1756
1757 // Compute where to merge into
1758 // Merge incoming control path
1759 r->init_req(pnum, newin->control());
1760
1761 if (pnum == 1) { // Last merge for this Region?
1762 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1763 Node* result = _gvn.transform(r);
1764 if (r != result && TraceOptoParse) {
1765 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1766 }
1767 }
1768 record_for_igvn(r);
1769 }
1770
1771 // Update all the non-control inputs to map:
1772 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1773 bool check_elide_phi = target->is_SEL_backedge(save_block);
1774 for (uint j = 1; j < newin->req(); j++) {
1775 Node* m = map()->in(j); // Current state of target.
1776 Node* n = newin->in(j); // Incoming change to target state.
1777 PhiNode* phi;
1778 if (m->is_Phi() && m->as_Phi()->region() == r)
1779 phi = m->as_Phi();
1780 else
1781 phi = nullptr;
1782 if (m != n) { // Different; must merge
1783 switch (j) {
1784 // Frame pointer and Return Address never changes
1785 case TypeFunc::FramePtr:// Drop m, use the original value
1786 case TypeFunc::ReturnAdr:
1787 break;
1788 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1789 assert(phi == nullptr, "the merge contains phis, not vice versa");
1790 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1791 continue;
1792 default: // All normal stuff
1793 if (phi == nullptr) {
1794 const JVMState* jvms = map()->jvms();
1795 if (EliminateNestedLocks &&
1796 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1797 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1798 // Use old BoxLock node as merged box.
1799 assert(newin->jvms()->is_monitor_box(j), "sanity");
1800 // This assert also tests that nodes are BoxLock.
1801 assert(BoxLockNode::same_slot(n, m), "sanity");
1808 // Incremental Inlining before EA and Macro nodes elimination.
1809 //
1810 // Incremental Inlining is executed after IGVN optimizations
1811 // during which BoxLock can be marked as Coarsened.
1812 old_box->set_coarsened(); // Verifies state
1813 old_box->set_unbalanced();
1814 }
1815 C->gvn_replace_by(n, m);
1816 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1817 phi = ensure_phi(j, nophi);
1818 }
1819 }
1820 break;
1821 }
1822 }
1823 // At this point, n might be top if:
1824 // - there is no phi (because TypeFlow detected a conflict), or
1825 // - the corresponding control edges is top (a dead incoming path)
1826 // It is a bug if we create a phi which sees a garbage value on a live path.
1827
1828 if (phi != nullptr) {
1829 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1830 assert(phi->region() == r, "");
1831 phi->set_req(pnum, n); // Then add 'n' to the merge
1832 if (pnum == PhiNode::Input) {
1833 // Last merge for this Phi.
1834 // So far, Phis have had a reasonable type from ciTypeFlow.
1835 // Now _gvn will join that with the meet of current inputs.
1836 // BOTTOM is never permissible here, 'cause pessimistically
1837 // Phis of pointers cannot lose the basic pointer type.
1838 DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
1839 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1840 map()->set_req(j, _gvn.transform(phi));
1841 DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
1842 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1843 record_for_igvn(phi);
1844 }
1845 }
1846 } // End of for all values to be merged
1847
1848 if (pnum == PhiNode::Input &&
1849 !r->in(0)) { // The occasional useless Region
1850 assert(control() == r, "");
1851 set_control(r->nonnull_req());
1852 }
1853
1854 map()->merge_replaced_nodes_with(newin);
1855
1856 // newin has been subsumed into the lazy merge, and is now dead.
1857 set_block(save_block);
1858
1859 stop(); // done with this guy, for now
1860 }
1861
1862 if (TraceOptoParse) {
1863 tty->print_cr(" on path %d", pnum);
1864 }
1865
1866 // Done with this parser state.
1867 assert(stopped(), "");
1868 }
1869
1981
1982 // Add new path to the region.
1983 uint pnum = r->req();
1984 r->add_req(nullptr);
1985
1986 for (uint i = 1; i < map->req(); i++) {
1987 Node* n = map->in(i);
1988 if (i == TypeFunc::Memory) {
1989 // Ensure a phi on all currently known memories.
1990 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1991 Node* phi = mms.memory();
1992 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1993 assert(phi->req() == pnum, "must be same size as region");
1994 phi->add_req(nullptr);
1995 }
1996 }
1997 } else {
1998 if (n->is_Phi() && n->as_Phi()->region() == r) {
1999 assert(n->req() == pnum, "must be same size as region");
2000 n->add_req(nullptr);
2001 }
2002 }
2003 }
2004
2005 return pnum;
2006 }
2007
2008 //------------------------------ensure_phi-------------------------------------
2009 // Turn the idx'th entry of the current map into a Phi
2010 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2011 SafePointNode* map = this->map();
2012 Node* region = map->control();
2013 assert(region->is_Region(), "");
2014
2015 Node* o = map->in(idx);
2016 assert(o != nullptr, "");
2017
2018 if (o == top()) return nullptr; // TOP always merges into TOP
2019
2020 if (o->is_Phi() && o->as_Phi()->region() == region) {
2021 return o->as_Phi();
2022 }
2023
2024 // Now use a Phi here for merging
2025 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2026 const JVMState* jvms = map->jvms();
2027 const Type* t = nullptr;
2028 if (jvms->is_loc(idx)) {
2029 t = block()->local_type_at(idx - jvms->locoff());
2030 } else if (jvms->is_stk(idx)) {
2031 t = block()->stack_type_at(idx - jvms->stkoff());
2032 } else if (jvms->is_mon(idx)) {
2033 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2034 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2035 } else if ((uint)idx < TypeFunc::Parms) {
2036 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2037 } else {
2038 assert(false, "no type information for this phi");
2039 }
2040
2041 // If the type falls to bottom, then this must be a local that
2042 // is mixing ints and oops or some such. Forcing it to top
2043 // makes it go dead.
2044 if (t == Type::BOTTOM) {
2045 map->set_req(idx, top());
2046 return nullptr;
2047 }
2048
2049 // Do not create phis for top either.
2050 // A top on a non-null control flow must be an unused even after the.phi.
2051 if (t == Type::TOP || t == Type::HALF) {
2052 map->set_req(idx, top());
2053 return nullptr;
2054 }
2055
2056 PhiNode* phi = PhiNode::make(region, o, t);
2057 gvn().set_type(phi, t);
2058 if (C->do_escape_analysis()) record_for_igvn(phi);
2059 map->set_req(idx, phi);
2060 return phi;
2061 }
2062
2063 //--------------------------ensure_memory_phi----------------------------------
2064 // Turn the idx'th slice of the current memory into a Phi
2065 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2066 MergeMemNode* mem = merged_memory();
2067 Node* region = control();
2068 assert(region->is_Region(), "");
2069
2070 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2071 assert(o != nullptr && o != top(), "");
2072
2073 PhiNode* phi;
2074 if (o->is_Phi() && o->as_Phi()->region() == region) {
2075 phi = o->as_Phi();
2076 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2077 // clone the shared base memory phi to make a new memory split
2078 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2079 const Type* t = phi->bottom_type();
2080 const TypePtr* adr_type = C->get_adr_type(idx);
2170 // Add check to deoptimize once holder klass is fully initialized.
2171 void Parse::clinit_deopt() {
2172 assert(C->has_method(), "only for normal compilations");
2173 assert(depth() == 1, "only for main compiled method");
2174 assert(is_normal_parse(), "no barrier needed on osr entry");
2175 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2176
2177 set_parse_bci(0);
2178
2179 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2180 guard_klass_being_initialized(holder);
2181 }
2182
2183 //------------------------------return_current---------------------------------
2184 // Append current _map to _exit_return
2185 void Parse::return_current(Node* value) {
2186 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2187 call_register_finalizer();
2188 }
2189
2190 // Do not set_parse_bci, so that return goo is credited to the return insn.
2191 set_bci(InvocationEntryBci);
2192 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2193 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2194 }
2195 if (C->env()->dtrace_method_probes()) {
2196 make_dtrace_method_exit(method());
2197 }
2198 SafePointNode* exit_return = _exits.map();
2199 exit_return->in( TypeFunc::Control )->add_req( control() );
2200 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2201 Node *mem = exit_return->in( TypeFunc::Memory );
2202 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2203 if (mms.is_empty()) {
2204 // get a copy of the base memory, and patch just this one input
2205 const TypePtr* adr_type = mms.adr_type(C);
2206 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2207 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2208 gvn().set_type_bottom(phi);
2209 phi->del_req(phi->req()-1); // prepare to re-patch
2210 mms.set_memory(phi);
2211 }
2212 mms.memory()->add_req(mms.memory2());
2213 }
2214
2215 // frame pointer is always same, already captured
2216 if (value != nullptr) {
2217 // If returning oops to an interface-return, there is a silent free
2218 // cast from oop to interface allowed by the Verifier. Make it explicit
2219 // here.
2220 Node* phi = _exits.argument(0);
2221 phi->add_req(value);
2222 }
2223
2224 if (_first_return) {
2225 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2226 _first_return = false;
2227 } else {
2228 _exits.map()->merge_replaced_nodes_with(map());
2229 }
2230
2231 stop_and_kill_map(); // This CFG path dies here
2232 }
2233
2234
2235 //------------------------------add_safepoint----------------------------------
2236 void Parse::add_safepoint() {
2237 uint parms = TypeFunc::Parms+1;
2238
2239 // Clear out dead values from the debug info.
2240 kill_dead_locals();
2241
2242 // Clone the JVM State
2243 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "interpreter/linkResolver.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/method.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/c2compiler.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/convertnode.hpp"
33 #include "opto/idealGraphPrinter.hpp"
34 #include "opto/inlinetypenode.hpp"
35 #include "opto/locknode.hpp"
36 #include "opto/memnode.hpp"
37 #include "opto/opaquenode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/type.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/safepointMechanism.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/bitMap.inline.hpp"
46 #include "utilities/copy.hpp"
47
48 // Static array so we can figure out which bytecodes stop us from compiling
49 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
50 // and eventually should be encapsulated in a proper class (gri 8/18/98).
51
52 #ifndef PRODUCT
53 uint nodes_created = 0;
54 uint methods_parsed = 0;
86 }
87 if (all_null_checks_found) {
88 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
89 (100*implicit_null_checks)/all_null_checks_found);
90 }
91 if (SharedRuntime::_implicit_null_throws) {
92 tty->print_cr("%u implicit null exceptions at runtime",
93 SharedRuntime::_implicit_null_throws);
94 }
95
96 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
97 BytecodeParseHistogram::print();
98 }
99 }
100 #endif
101
102 //------------------------------ON STACK REPLACEMENT---------------------------
103
104 // Construct a node which can be used to get incoming state for
105 // on stack replacement.
106 Node* Parse::fetch_interpreter_state(int index,
107 const Type* type,
108 Node* local_addrs,
109 Node* local_addrs_base) {
110 BasicType bt = type->basic_type();
111 if (type == TypePtr::NULL_PTR) {
112 // Ptr types are mixed together with T_ADDRESS but nullptr is
113 // really for T_OBJECT types so correct it.
114 bt = T_OBJECT;
115 }
116 Node *mem = memory(Compile::AliasIdxRaw);
117 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
118 Node *ctl = control();
119
120 // Very similar to LoadNode::make, except we handle un-aligned longs and
121 // doubles on Sparc. Intel can handle them just fine directly.
122 Node *l = nullptr;
123 switch (bt) { // Signature is flattened
124 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
125 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
126 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
127 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
128 case T_LONG:
129 case T_DOUBLE: {
130 // Since arguments are in reverse order, the argument address 'adr'
131 // refers to the back half of the long/double. Recompute adr.
132 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
133 if (Matcher::misaligned_doubles_ok) {
134 l = (bt == T_DOUBLE)
135 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
136 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
137 } else {
138 l = (bt == T_DOUBLE)
139 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
140 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
141 }
142 break;
143 }
144 default: ShouldNotReachHere();
145 }
146 return _gvn.transform(l);
147 }
148
149 // Helper routine to prevent the interpreter from handing
150 // unexpected typestate to an OSR method.
151 // The Node l is a value newly dug out of the interpreter frame.
152 // The type is the type predicted by ciTypeFlow. Note that it is
153 // not a general type, but can only come from Type::get_typeflow_type.
154 // The safepoint is a map which will feed an uncommon trap.
155 Node* Parse::check_interpreter_type(Node* l, const Type* type,
156 SafePointNode* &bad_type_exit, bool is_early_larval) {
157 const TypeOopPtr* tp = type->isa_oopptr();
158
159 // TypeFlow may assert null-ness if a type appears unloaded.
160 if (type == TypePtr::NULL_PTR ||
161 (tp != nullptr && !tp->is_loaded())) {
162 // Value must be null, not a real oop.
163 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
164 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
165 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
166 set_control(_gvn.transform( new IfTrueNode(iff) ));
167 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
168 bad_type_exit->control()->add_req(bad_type);
169 l = null();
170 }
171
172 // Typeflow can also cut off paths from the CFG, based on
173 // types which appear unloaded, or call sites which appear unlinked.
174 // When paths are cut off, values at later merge points can rise
175 // toward more specific classes. Make sure these specific classes
176 // are still in effect.
177 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
178 // TypeFlow asserted a specific object type. Value must have that type.
179 Node* bad_type_ctrl = nullptr;
180 if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
181 // Check inline types for null here to prevent checkcast from adding an
182 // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
183 l = null_check_oop(l, &bad_type_ctrl);
184 bad_type_exit->control()->add_req(bad_type_ctrl);
185 }
186
187 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl, false, is_early_larval);
188 bad_type_exit->control()->add_req(bad_type_ctrl);
189 }
190
191 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
192 return l;
193 }
194
195 // Helper routine which sets up elements of the initial parser map when
196 // performing a parse for on stack replacement. Add values into map.
197 // The only parameter contains the address of a interpreter arguments.
198 void Parse::load_interpreter_state(Node* osr_buf) {
199 int index;
200 int max_locals = jvms()->loc_size();
201 int max_stack = jvms()->stk_size();
202
203 // Mismatch between method and jvms can occur since map briefly held
204 // an OSR entry state (which takes up one RawPtr word).
205 assert(max_locals == method()->max_locals(), "sanity");
206 assert(max_stack >= method()->max_stack(), "sanity");
207 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
208 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
209
210 // Find the start block.
211 Block* osr_block = start_block();
212 assert(osr_block->start() == osr_bci(), "sanity");
213
214 // Set initial BCI.
215 set_parse_bci(osr_block->start());
216
217 // Set initial stack depth.
218 set_sp(osr_block->start_sp());
219
220 // Check bailouts. We currently do not perform on stack replacement
221 // of loops in catch blocks or loops which branch with a non-empty stack.
222 if (sp() != 0) {
237 for (index = 0; index < mcnt; index++) {
238 // Make a BoxLockNode for the monitor.
239 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
240 // Check for bailout after new BoxLockNode
241 if (failing()) { return; }
242
243 // This OSR locking region is unbalanced because it does not have Lock node:
244 // locking was done in Interpreter.
245 // This is similar to Coarsened case when Lock node is eliminated
246 // and as result the region is marked as Unbalanced.
247
248 // Emulate Coarsened state transition from Regular to Unbalanced.
249 osr_box->set_coarsened();
250 osr_box->set_unbalanced();
251
252 Node* box = _gvn.transform(osr_box);
253
254 // Displaced headers and locked objects are interleaved in the
255 // temp OSR buffer. We only copy the locked objects out here.
256 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
257 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
258 // Try and copy the displaced header to the BoxNode
259 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
260
261 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
262
263 // Build a bogus FastLockNode (no code will be generated) and push the
264 // monitor into our debug info.
265 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
266 map()->push_monitor(flock);
267
268 // If the lock is our method synchronization lock, tuck it away in
269 // _sync_lock for return and rethrow exit paths.
270 if (index == 0 && method()->is_synchronized()) {
271 _synch_lock = flock;
272 }
273 }
274
275 // Use the raw liveness computation to make sure that unexpected
276 // values don't propagate into the OSR frame.
277 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
278 if (!live_locals.is_valid()) {
279 // Degenerate or breakpointed method.
307 if (C->log() != nullptr) {
308 C->log()->elem("OSR_mismatch local_index='%d'",index);
309 }
310 set_local(index, null());
311 // and ignore it for the loads
312 continue;
313 }
314 }
315
316 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
317 if (type == Type::TOP || type == Type::HALF) {
318 continue;
319 }
320 // If the type falls to bottom, then this must be a local that
321 // is mixing ints and oops or some such. Forcing it to top
322 // makes it go dead.
323 if (type == Type::BOTTOM) {
324 continue;
325 }
326 // Construct code to access the appropriate local.
327 Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
328 set_local(index, value);
329 }
330
331 // Extract the needed stack entries from the interpreter frame.
332 for (index = 0; index < sp(); index++) {
333 const Type *type = osr_block->stack_type_at(index);
334 if (type != Type::TOP) {
335 // Currently the compiler bails out when attempting to on stack replace
336 // at a bci with a non-empty stack. We should not reach here.
337 ShouldNotReachHere();
338 }
339 }
340
341 // End the OSR migration
342 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
343 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
344 "OSR_migration_end", TypeRawPtr::BOTTOM,
345 osr_buf);
346
347 // Now that the interpreter state is loaded, make sure it will match
358 if (type->isa_oopptr() != nullptr) {
359 if (!live_oops.at(index)) {
360 // skip type check for dead oops
361 continue;
362 }
363 }
364 if (osr_block->flow()->local_type_at(index)->is_return_address()) {
365 // In our current system it's illegal for jsr addresses to be
366 // live into an OSR entry point because the compiler performs
367 // inlining of jsrs. ciTypeFlow has a bailout that detect this
368 // case and aborts the compile if addresses are live into an OSR
369 // entry point. Because of that we can assume that any address
370 // locals at the OSR entry point are dead. Method liveness
371 // isn't precise enough to figure out that they are dead in all
372 // cases so simply skip checking address locals all
373 // together. Any type check is guaranteed to fail since the
374 // interpreter type is the result of a load which might have any
375 // value and the expected type is a constant.
376 continue;
377 }
378 bool is_early_larval = osr_block->flow()->local_type_at(index)->is_early_larval();
379 set_local(index, check_interpreter_type(l, type, bad_type_exit, is_early_larval));
380 }
381
382 for (index = 0; index < sp(); index++) {
383 if (stopped()) break;
384 Node* l = stack(index);
385 if (l->is_top()) continue; // nothing here
386 const Type *type = osr_block->stack_type_at(index);
387 bool is_early_larval = osr_block->flow()->stack_type_at(index)->is_early_larval();
388 set_stack(index, check_interpreter_type(l, type, bad_type_exit, is_early_larval));
389 }
390
391 if (bad_type_exit->control()->req() > 1) {
392 // Build an uncommon trap here, if any inputs can be unexpected.
393 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
394 record_for_igvn(bad_type_exit->control());
395 SafePointNode* types_are_good = map();
396 set_map(bad_type_exit);
397 // The unexpected type happens because a new edge is active
398 // in the CFG, which typeflow had previously ignored.
399 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
400 // This x will be typed as Integer if notReached is not yet linked.
401 // It could also happen due to a problem in ciTypeFlow analysis.
402 uncommon_trap(Deoptimization::Reason_constraint,
403 Deoptimization::Action_reinterpret);
404 set_map(types_are_good);
405 }
406 }
407
408 //------------------------------Parse------------------------------------------
509 // either breakpoint setting or hotswapping of methods may
510 // cause deoptimization.
511 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
512 C->dependencies()->assert_evol_method(method());
513 }
514
515 NOT_PRODUCT(methods_seen++);
516
517 // Do some special top-level things.
518 if (depth() == 1 && C->is_osr_compilation()) {
519 _tf = C->tf(); // the OSR entry type is different
520 _entry_bci = C->entry_bci();
521 _flow = method()->get_osr_flow_analysis(osr_bci());
522 } else {
523 _tf = TypeFunc::make(method());
524 _entry_bci = InvocationEntryBci;
525 _flow = method()->get_flow_analysis();
526 }
527
528 if (_flow->failing()) {
529 // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
530 // can lead to this. Re-enable once 8284443 is fixed.
531 //assert(false, "type flow analysis failed during parsing");
532 C->record_method_not_compilable(_flow->failure_reason());
533 #ifndef PRODUCT
534 if (PrintOpto && (Verbose || WizardMode)) {
535 if (is_osr_parse()) {
536 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
537 } else {
538 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
539 }
540 if (Verbose) {
541 method()->print();
542 method()->print_codes();
543 _flow->print();
544 }
545 }
546 #endif
547 }
548
549 #ifdef ASSERT
550 if (depth() == 1) {
551 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
602 load_interpreter_state(osr_buf);
603 } else {
604 set_map(entry_map);
605 do_method_entry();
606 }
607
608 if (depth() == 1 && !failing()) {
609 if (C->clinit_barrier_on_entry()) {
610 // Add check to deoptimize the nmethod once the holder class is fully initialized
611 clinit_deopt();
612 }
613 }
614
615 // Check for bailouts during method entry.
616 if (failing()) {
617 if (log) log->done("parse");
618 C->set_default_node_notes(caller_nn);
619 return;
620 }
621
622 // Handle inline type arguments
623 int arg_size = method()->arg_size();
624 for (int i = 0; i < arg_size; i++) {
625 Node* parm = local(i);
626 const Type* t = _gvn.type(parm);
627 if (t->is_inlinetypeptr()) {
628 // If the parameter is a value object, try to scalarize it if we know that it is unrestricted (not early larval)
629 // Parameters are non-larval except the receiver of a constructor, which must be an early larval object.
630 if (!(method()->is_object_constructor() && i == 0)) {
631 // Create InlineTypeNode from the oop and replace the parameter
632 Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass());
633 replace_in_map(parm, vt);
634 }
635 } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() &&
636 t->isa_aryptr() != nullptr && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_flat() &&
637 (!t->is_aryptr()->is_not_null_free() || !t->is_aryptr()->is_not_flat())) {
638 // Speculate on varargs Object array being not null-free and not flat
639 const TypePtr* spec_type = t->speculative();
640 spec_type = (spec_type != nullptr && spec_type->isa_aryptr() != nullptr) ? spec_type : t->is_aryptr();
641 spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free()->cast_to_not_flat();
642 spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
643 Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
644 replace_in_map(parm, cast);
645 }
646 }
647
648 entry_map = map(); // capture any changes performed by method setup code
649 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
650
651 // We begin parsing as if we have just encountered a jump to the
652 // method entry.
653 Block* entry_block = start_block();
654 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
655 set_map_clone(entry_map);
656 merge_common(entry_block, entry_block->next_path_num());
657
658 #ifndef PRODUCT
659 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
660 set_parse_histogram( parse_histogram_obj );
661 #endif
662
663 // Parse all the basic blocks.
664 do_all_blocks();
665
666 // Check for bailouts during conversion to graph
667 if (failing()) {
813 void Parse::build_exits() {
814 // make a clone of caller to prevent sharing of side-effects
815 _exits.set_map(_exits.clone_map());
816 _exits.clean_stack(_exits.sp());
817 _exits.sync_jvms();
818
819 RegionNode* region = new RegionNode(1);
820 record_for_igvn(region);
821 gvn().set_type_bottom(region);
822 _exits.set_control(region);
823
824 // Note: iophi and memphi are not transformed until do_exits.
825 Node* iophi = new PhiNode(region, Type::ABIO);
826 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
827 gvn().set_type_bottom(iophi);
828 gvn().set_type_bottom(memphi);
829 _exits.set_i_o(iophi);
830 _exits.set_all_memory(memphi);
831
832 // Add a return value to the exit state. (Do not push it yet.)
833 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
834 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
835 if (ret_type->isa_int()) {
836 BasicType ret_bt = method()->return_type()->basic_type();
837 if (ret_bt == T_BOOLEAN ||
838 ret_bt == T_CHAR ||
839 ret_bt == T_BYTE ||
840 ret_bt == T_SHORT) {
841 ret_type = TypeInt::INT;
842 }
843 }
844
845 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
846 // becomes loaded during the subsequent parsing, the loaded and unloaded
847 // types will not join when we transform and push in do_exits().
848 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
849 if (ret_oop_type && !ret_oop_type->is_loaded()) {
850 ret_type = TypeOopPtr::BOTTOM;
851 }
852 int ret_size = type2size[ret_type->basic_type()];
853 Node* ret_phi = new PhiNode(region, ret_type);
854 gvn().set_type_bottom(ret_phi);
855 _exits.ensure_stack(ret_size);
856 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
857 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
858 _exits.set_argument(0, ret_phi); // here is where the parser finds it
859 // Note: ret_phi is not yet pushed, until do_exits.
860 }
861 }
862
863 //----------------------------build_start_state-------------------------------
864 // Construct a state which contains only the incoming arguments from an
865 // unknown caller. The method & bci will be null & InvocationEntryBci.
866 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
867 int arg_size = tf->domain_sig()->cnt();
868 int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
869 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
870 SafePointNode* map = new SafePointNode(max_size, jvms);
871 jvms->set_map(map);
872 record_for_igvn(map);
873 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
874 Node_Notes* old_nn = default_node_notes();
875 if (old_nn != nullptr && has_method()) {
876 Node_Notes* entry_nn = old_nn->clone(this);
877 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
878 entry_jvms->set_offsets(0);
879 entry_jvms->set_bci(entry_bci());
880 entry_nn->set_jvms(entry_jvms);
881 set_default_node_notes(entry_nn);
882 }
883 PhaseGVN& gvn = *initial_gvn();
884 uint i = 0;
885 int arg_num = 0;
886 for (uint j = 0; i < (uint)arg_size; i++) {
887 const Type* t = tf->domain_sig()->field_at(i);
888 Node* parm = nullptr;
889 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
890 // Inline type arguments are not passed by reference: we get an argument per
891 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
892 GraphKit kit(jvms, &gvn);
893 kit.set_control(map->control());
894 Node* old_mem = map->memory();
895 // Use immutable memory for inline type loads and restore it below
896 kit.set_all_memory(C->immutable_memory());
897 parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
898 map->set_control(kit.control());
899 map->set_memory(old_mem);
900 } else {
901 parm = gvn.transform(new ParmNode(start, j++));
902 }
903 map->init_req(i, parm);
904 // Record all these guys for later GVN.
905 record_for_igvn(parm);
906 if (i >= TypeFunc::Parms && t != Type::HALF) {
907 arg_num++;
908 }
909 }
910 for (; i < map->req(); i++) {
911 map->init_req(i, top());
912 }
913 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
914 set_default_node_notes(old_nn);
915 return jvms;
916 }
917
918 //-----------------------------make_node_notes---------------------------------
919 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
920 if (caller_nn == nullptr) return nullptr;
921 Node_Notes* nn = caller_nn->clone(C);
922 JVMState* caller_jvms = nn->jvms();
923 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
924 jvms->set_offsets(0);
925 jvms->set_bci(_entry_bci);
926 nn->set_jvms(jvms);
927 return nn;
928 }
929
930
931 //--------------------------return_values--------------------------------------
932 void Compile::return_values(JVMState* jvms) {
933 GraphKit kit(jvms);
934 Node* ret = new ReturnNode(TypeFunc::Parms,
935 kit.control(),
936 kit.i_o(),
937 kit.reset_memory(),
938 kit.frameptr(),
939 kit.returnadr());
940 // Add zero or 1 return values
941 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
942 if (ret_size > 0) {
943 kit.inc_sp(-ret_size); // pop the return value(s)
944 kit.sync_jvms();
945 Node* res = kit.argument(0);
946 if (tf()->returns_inline_type_as_fields()) {
947 // Multiple return values (inline type fields): add as many edges
948 // to the Return node as returned values.
949 InlineTypeNode* vt = res->as_InlineType();
950 ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
951 if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
952 ret->init_req(TypeFunc::Parms, vt);
953 } else {
954 // Return the tagged klass pointer to signal scalarization to the caller
955 Node* tagged_klass = vt->tagged_klass(kit.gvn());
956 // Return null if the inline type is null (null marker field is not set)
957 Node* conv = kit.gvn().transform(new ConvI2LNode(vt->get_null_marker()));
958 Node* shl = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
959 Node* shr = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
960 tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
961 ret->init_req(TypeFunc::Parms, tagged_klass);
962 }
963 uint idx = TypeFunc::Parms + 1;
964 vt->pass_fields(&kit, ret, idx, false, false);
965 } else {
966 ret->add_req(res);
967 // Note: The second dummy edge is not needed by a ReturnNode.
968 }
969 }
970 // bind it to root
971 root()->add_req(ret);
972 record_for_igvn(ret);
973 initial_gvn()->transform(ret);
974 }
975
976 //------------------------rethrow_exceptions-----------------------------------
977 // Bind all exception states in the list into a single RethrowNode.
978 void Compile::rethrow_exceptions(JVMState* jvms) {
979 GraphKit kit(jvms);
980 if (!kit.has_exceptions()) return; // nothing to generate
981 // Load my combined exception state into the kit, with all phis transformed:
982 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
983 Node* ex_oop = kit.use_exception_state(ex_map);
984 RethrowNode* exit = new RethrowNode(kit.control(),
985 kit.i_o(), kit.reset_memory(),
986 kit.frameptr(), kit.returnadr(),
987 // like a return but with exception input
988 ex_oop);
1072 // to complete, we force all writes to complete.
1073 //
1074 // 2. Experimental VM option is used to force the barrier if any field
1075 // was written out in the constructor.
1076 //
1077 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1078 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1079 // MemBarVolatile is used before volatile load instead of after volatile
1080 // store, so there's no barrier after the store.
1081 // We want to guarantee the same behavior as on platforms with total store
1082 // order, although this is not required by the Java memory model.
1083 // In this case, we want to enforce visibility of volatile field
1084 // initializations which are performed in constructors.
1085 // So as with finals, we add a barrier here.
1086 //
1087 // "All bets are off" unless the first publication occurs after a
1088 // normal return from the constructor. We do not attempt to detect
1089 // such unusual early publications. But no barrier is needed on
1090 // exceptional returns, since they cannot publish normally.
1091 //
1092 if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1093 (wrote_final() || wrote_stable() ||
1094 (AlwaysSafeConstructors && wrote_fields()) ||
1095 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1096 Node* recorded_alloc = alloc_with_final_or_stable();
1097 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1098 recorded_alloc);
1099
1100 // If Memory barrier is created for final fields write
1101 // and allocation node does not escape the initialize method,
1102 // then barrier introduced by allocation node can be removed.
1103 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1104 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1105 alloc->compute_MemBar_redundancy(method());
1106 }
1107 if (PrintOpto && (Verbose || WizardMode)) {
1108 method()->print_name();
1109 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1110 }
1111 }
1112
1113 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1114 // transform each slice of the original memphi:
1115 mms.set_memory(_gvn.transform(mms.memory()));
1116 }
1117 // Clean up input MergeMems created by transforming the slices
1118 _gvn.transform(_exits.merged_memory());
1119
1120 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1121 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1122 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1123 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1124 // If the type we set for the ret_phi in build_exits() is too optimistic and
1125 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1126 // loading. It could also be due to an error, so mark this method as not compilable because
1127 // otherwise this could lead to an infinite compile loop.
1128 // In any case, this code path is rarely (and never in my testing) reached.
1129 C->record_method_not_compilable("Can't determine return type.");
1130 return;
1131 }
1132 if (ret_type->isa_int()) {
1133 BasicType ret_bt = method()->return_type()->basic_type();
1134 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1135 }
1136 _exits.push_node(ret_type->basic_type(), ret_phi);
1137 }
1138
1139 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1140
1141 // Unlock along the exceptional paths.
1195
1196 //-----------------------------create_entry_map-------------------------------
1197 // Initialize our parser map to contain the types at method entry.
1198 // For OSR, the map contains a single RawPtr parameter.
1199 // Initial monitor locking for sync. methods is performed by do_method_entry.
1200 SafePointNode* Parse::create_entry_map() {
1201 // Check for really stupid bail-out cases.
1202 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1203 if (len >= 32760) {
1204 // Bailout expected, this is a very rare edge case.
1205 C->record_method_not_compilable("too many local variables");
1206 return nullptr;
1207 }
1208
1209 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1210 _caller->map()->delete_replaced_nodes();
1211
1212 // If this is an inlined method, we may have to do a receiver null check.
1213 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1214 GraphKit kit(_caller);
1215 Node* receiver = kit.argument(0);
1216 Node* null_free = kit.null_check_receiver_before_call(method());
1217 _caller = kit.transfer_exceptions_into_jvms();
1218
1219 if (kit.stopped()) {
1220 _exits.add_exception_states_from(_caller);
1221 _exits.set_jvms(_caller);
1222 return nullptr;
1223 }
1224 }
1225
1226 assert(method() != nullptr, "parser must have a method");
1227
1228 // Create an initial safepoint to hold JVM state during parsing
1229 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1230 set_map(new SafePointNode(len, jvms));
1231 jvms->set_map(map());
1232 record_for_igvn(map());
1233 assert(jvms->endoff() == len, "correct jvms sizing");
1234
1235 SafePointNode* inmap = _caller->map();
1236 assert(inmap != nullptr, "must have inmap");
1237 // In case of null check on receiver above
1238 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1239
1240 uint i;
1241
1242 // Pass thru the predefined input parameters.
1243 for (i = 0; i < TypeFunc::Parms; i++) {
1244 map()->init_req(i, inmap->in(i));
1245 }
1246
1247 if (depth() == 1) {
1248 assert(map()->memory()->Opcode() == Op_Parm, "");
1249 // Insert the memory aliasing node
1250 set_all_memory(reset_memory());
1251 }
1252 assert(merged_memory(), "");
1253
1254 // Now add the locals which are initially bound to arguments:
1255 uint arg_size = tf()->domain_sig()->cnt();
1256 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1257 for (i = TypeFunc::Parms; i < arg_size; i++) {
1258 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1259 }
1260
1261 // Clear out the rest of the map (locals and stack)
1262 for (i = arg_size; i < len; i++) {
1263 map()->init_req(i, top());
1264 }
1265
1266 SafePointNode* entry_map = stop();
1267 return entry_map;
1268 }
1269
1270 //-----------------------------do_method_entry--------------------------------
1271 // Emit any code needed in the pseudo-block before BCI zero.
1272 // The main thing to do is lock the receiver of a synchronized method.
1273 void Parse::do_method_entry() {
1274 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1275 set_sp(0); // Java Stack Pointer
1309
1310 // If the method is synchronized, we need to construct a lock node, attach
1311 // it to the Start node, and pin it there.
1312 if (method()->is_synchronized()) {
1313 // Insert a FastLockNode right after the Start which takes as arguments
1314 // the current thread pointer, the "this" pointer & the address of the
1315 // stack slot pair used for the lock. The "this" pointer is a projection
1316 // off the start node, but the locking spot has to be constructed by
1317 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1318 // becomes the second argument to the FastLockNode call. The
1319 // FastLockNode becomes the new control parent to pin it to the start.
1320
1321 // Setup Object Pointer
1322 Node *lock_obj = nullptr;
1323 if (method()->is_static()) {
1324 ciInstance* mirror = _method->holder()->java_mirror();
1325 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1326 lock_obj = makecon(t_lock);
1327 } else { // Else pass the "this" pointer,
1328 lock_obj = local(0); // which is Parm0 from StartNode
1329 assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1330 }
1331 // Clear out dead values from the debug info.
1332 kill_dead_locals();
1333 // Build the FastLockNode
1334 _synch_lock = shared_lock(lock_obj);
1335 // Check for bailout in shared_lock
1336 if (failing()) { return; }
1337 }
1338
1339 // Feed profiling data for parameters to the type system so it can
1340 // propagate it as speculative types
1341 record_profiled_parameters_for_speculation();
1342 }
1343
1344 //------------------------------init_blocks------------------------------------
1345 // Initialize our parser map to contain the types/monitors at method entry.
1346 void Parse::init_blocks() {
1347 // Create the blocks.
1348 _block_count = flow()->block_count();
1349 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1745 //--------------------handle_missing_successor---------------------------------
1746 void Parse::handle_missing_successor(int target_bci) {
1747 #ifndef PRODUCT
1748 Block* b = block();
1749 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1750 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1751 #endif
1752 ShouldNotReachHere();
1753 }
1754
1755 //--------------------------merge_common---------------------------------------
1756 void Parse::merge_common(Parse::Block* target, int pnum) {
1757 if (TraceOptoParse) {
1758 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1759 }
1760
1761 // Zap extra stack slots to top
1762 assert(sp() == target->start_sp(), "");
1763 clean_stack(sp());
1764
1765 // Check for merge conflicts involving inline types
1766 JVMState* old_jvms = map()->jvms();
1767 int old_bci = bci();
1768 JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1769 tmp_jvms->set_should_reexecute(true);
1770 tmp_jvms->bind_map(map());
1771 // Execution needs to restart a the next bytecode (entry of next
1772 // block)
1773 if (target->is_merged() ||
1774 pnum > PhiNode::Input ||
1775 target->is_handler() ||
1776 target->is_loop_head()) {
1777 set_parse_bci(target->start());
1778 for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1779 Node* n = map()->in(j); // Incoming change to target state.
1780 const Type* t = nullptr;
1781 if (tmp_jvms->is_loc(j)) {
1782 t = target->local_type_at(j - tmp_jvms->locoff());
1783 } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1784 t = target->stack_type_at(j - tmp_jvms->stkoff());
1785 }
1786 if (t != nullptr && t != Type::BOTTOM) {
1787 // An object can appear in the JVMS as either an oop or an InlineTypeNode. If the merge is
1788 // an InlineTypeNode, we need all the merge inputs to be InlineTypeNodes. Else, if the
1789 // merge is an oop, each merge input needs to be either an oop or an buffered
1790 // InlineTypeNode.
1791 if (!t->is_inlinetypeptr()) {
1792 // The merge cannot be an InlineTypeNode, ensure the input is buffered if it is an
1793 // InlineTypeNode
1794 if (n->is_InlineType()) {
1795 map()->set_req(j, n->as_InlineType()->buffer(this));
1796 }
1797 } else {
1798 // Since the merge is a value object, it can either be an oop or an InlineTypeNode
1799 if (!target->is_merged()) {
1800 // This is the first processed input of the merge. If it is an InlineTypeNode, the
1801 // merge will be an InlineTypeNode. Else, try to scalarize so the merge can be
1802 // scalarized as well. However, we cannot blindly scalarize an inline type oop here
1803 // since it may be larval
1804 if (!n->is_InlineType() && gvn().type(n)->is_zero_type()) {
1805 // Null constant implies that this is not a larval object
1806 map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1807 }
1808 } else {
1809 Node* phi = target->start_map()->in(j);
1810 if (phi->is_InlineType()) {
1811 // Larval oops cannot be merged with non-larval ones, and since the merge point is
1812 // non-larval, n must be non-larval as well. As a result, we can scalarize n to merge
1813 // into phi
1814 if (!n->is_InlineType()) {
1815 map()->set_req(j, InlineTypeNode::make_from_oop(this, n, t->inline_klass()));
1816 }
1817 } else {
1818 // The merge is an oop phi, ensure the input is buffered if it is an InlineTypeNode
1819 if (n->is_InlineType()) {
1820 map()->set_req(j, n->as_InlineType()->buffer(this));
1821 }
1822 }
1823 }
1824 }
1825 }
1826 }
1827 }
1828 old_jvms->bind_map(map());
1829 set_parse_bci(old_bci);
1830
1831 if (!target->is_merged()) { // No prior mapping at this bci
1832 if (TraceOptoParse) { tty->print(" with empty state"); }
1833
1834 // If this path is dead, do not bother capturing it as a merge.
1835 // It is "as if" we had 1 fewer predecessors from the beginning.
1836 if (stopped()) {
1837 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1838 return;
1839 }
1840
1841 // Make a region if we know there are multiple or unpredictable inputs.
1842 // (Also, if this is a plain fall-through, we might see another region,
1843 // which must not be allowed into this block's map.)
1844 if (pnum > PhiNode::Input // Known multiple inputs.
1845 || target->is_handler() // These have unpredictable inputs.
1846 || target->is_loop_head() // Known multiple inputs
1847 || control()->is_Region()) { // We must hide this guy.
1848
1849 int current_bci = bci();
1850 set_parse_bci(target->start()); // Set target bci
1865 record_for_igvn(r);
1866 // zap all inputs to null for debugging (done in Node(uint) constructor)
1867 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1868 r->init_req(pnum, control());
1869 set_control(r);
1870 target->copy_irreducible_status_to(r, jvms());
1871 set_parse_bci(current_bci); // Restore bci
1872 }
1873
1874 // Convert the existing Parser mapping into a mapping at this bci.
1875 store_state_to(target);
1876 assert(target->is_merged(), "do not come here twice");
1877
1878 } else { // Prior mapping at this bci
1879 if (TraceOptoParse) { tty->print(" with previous state"); }
1880 #ifdef ASSERT
1881 if (target->is_SEL_head()) {
1882 target->mark_merged_backedge(block());
1883 }
1884 #endif
1885
1886 // We must not manufacture more phis if the target is already parsed.
1887 bool nophi = target->is_parsed();
1888
1889 SafePointNode* newin = map();// Hang on to incoming mapping
1890 Block* save_block = block(); // Hang on to incoming block;
1891 load_state_from(target); // Get prior mapping
1892
1893 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1894 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1895 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1896 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1897
1898 // Iterate over my current mapping and the old mapping.
1899 // Where different, insert Phi functions.
1900 // Use any existing Phi functions.
1901 assert(control()->is_Region(), "must be merging to a region");
1902 RegionNode* r = control()->as_Region();
1903
1904 // Compute where to merge into
1905 // Merge incoming control path
1906 r->init_req(pnum, newin->control());
1907
1908 if (pnum == 1) { // Last merge for this Region?
1909 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1910 Node* result = _gvn.transform(r);
1911 if (r != result && TraceOptoParse) {
1912 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1913 }
1914 }
1915 record_for_igvn(r);
1916 }
1917
1918 // Update all the non-control inputs to map:
1919 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1920 bool check_elide_phi = target->is_SEL_backedge(save_block);
1921 bool last_merge = (pnum == PhiNode::Input);
1922 for (uint j = 1; j < newin->req(); j++) {
1923 Node* m = map()->in(j); // Current state of target.
1924 Node* n = newin->in(j); // Incoming change to target state.
1925 Node* phi;
1926 if (m->is_Phi() && m->as_Phi()->region() == r) {
1927 phi = m;
1928 } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1929 phi = m;
1930 } else {
1931 phi = nullptr;
1932 }
1933 if (m != n) { // Different; must merge
1934 switch (j) {
1935 // Frame pointer and Return Address never changes
1936 case TypeFunc::FramePtr:// Drop m, use the original value
1937 case TypeFunc::ReturnAdr:
1938 break;
1939 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1940 assert(phi == nullptr, "the merge contains phis, not vice versa");
1941 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1942 continue;
1943 default: // All normal stuff
1944 if (phi == nullptr) {
1945 const JVMState* jvms = map()->jvms();
1946 if (EliminateNestedLocks &&
1947 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1948 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1949 // Use old BoxLock node as merged box.
1950 assert(newin->jvms()->is_monitor_box(j), "sanity");
1951 // This assert also tests that nodes are BoxLock.
1952 assert(BoxLockNode::same_slot(n, m), "sanity");
1959 // Incremental Inlining before EA and Macro nodes elimination.
1960 //
1961 // Incremental Inlining is executed after IGVN optimizations
1962 // during which BoxLock can be marked as Coarsened.
1963 old_box->set_coarsened(); // Verifies state
1964 old_box->set_unbalanced();
1965 }
1966 C->gvn_replace_by(n, m);
1967 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1968 phi = ensure_phi(j, nophi);
1969 }
1970 }
1971 break;
1972 }
1973 }
1974 // At this point, n might be top if:
1975 // - there is no phi (because TypeFlow detected a conflict), or
1976 // - the corresponding control edges is top (a dead incoming path)
1977 // It is a bug if we create a phi which sees a garbage value on a live path.
1978
1979 // Merging two inline types?
1980 if (phi != nullptr && phi->is_InlineType()) {
1981 // Reload current state because it may have been updated by ensure_phi
1982 assert(phi == map()->in(j), "unexpected value in map");
1983 assert(phi->as_InlineType()->has_phi_inputs(r), "");
1984 InlineTypeNode* vtm = phi->as_InlineType(); // Current inline type
1985 InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
1986 assert(vtm == phi, "Inline type should have Phi input");
1987
1988 #ifdef ASSERT
1989 if (TraceOptoParse) {
1990 tty->print_cr("\nMerging inline types");
1991 tty->print_cr("Current:");
1992 vtm->dump(2);
1993 tty->print_cr("Incoming:");
1994 vtn->dump(2);
1995 tty->cr();
1996 }
1997 #endif
1998 // Do the merge
1999 vtm->merge_with(&_gvn, vtn, pnum, last_merge);
2000 if (last_merge) {
2001 map()->set_req(j, _gvn.transform(vtm));
2002 record_for_igvn(vtm);
2003 }
2004 } else if (phi != nullptr) {
2005 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
2006 assert(phi->as_Phi()->region() == r, "");
2007 phi->set_req(pnum, n); // Then add 'n' to the merge
2008 if (last_merge) {
2009 // Last merge for this Phi.
2010 // So far, Phis have had a reasonable type from ciTypeFlow.
2011 // Now _gvn will join that with the meet of current inputs.
2012 // BOTTOM is never permissible here, 'cause pessimistically
2013 // Phis of pointers cannot lose the basic pointer type.
2014 DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
2015 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
2016 map()->set_req(j, _gvn.transform(phi));
2017 DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
2018 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
2019 record_for_igvn(phi);
2020 }
2021 }
2022 } // End of for all values to be merged
2023
2024 if (last_merge && !r->in(0)) { // The occasional useless Region
2025 assert(control() == r, "");
2026 set_control(r->nonnull_req());
2027 }
2028
2029 map()->merge_replaced_nodes_with(newin);
2030
2031 // newin has been subsumed into the lazy merge, and is now dead.
2032 set_block(save_block);
2033
2034 stop(); // done with this guy, for now
2035 }
2036
2037 if (TraceOptoParse) {
2038 tty->print_cr(" on path %d", pnum);
2039 }
2040
2041 // Done with this parser state.
2042 assert(stopped(), "");
2043 }
2044
2156
2157 // Add new path to the region.
2158 uint pnum = r->req();
2159 r->add_req(nullptr);
2160
2161 for (uint i = 1; i < map->req(); i++) {
2162 Node* n = map->in(i);
2163 if (i == TypeFunc::Memory) {
2164 // Ensure a phi on all currently known memories.
2165 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2166 Node* phi = mms.memory();
2167 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2168 assert(phi->req() == pnum, "must be same size as region");
2169 phi->add_req(nullptr);
2170 }
2171 }
2172 } else {
2173 if (n->is_Phi() && n->as_Phi()->region() == r) {
2174 assert(n->req() == pnum, "must be same size as region");
2175 n->add_req(nullptr);
2176 } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2177 n->as_InlineType()->add_new_path(r);
2178 }
2179 }
2180 }
2181
2182 return pnum;
2183 }
2184
2185 //------------------------------ensure_phi-------------------------------------
2186 // Turn the idx'th entry of the current map into a Phi
2187 Node* Parse::ensure_phi(int idx, bool nocreate) {
2188 SafePointNode* map = this->map();
2189 Node* region = map->control();
2190 assert(region->is_Region(), "");
2191
2192 Node* o = map->in(idx);
2193 assert(o != nullptr, "");
2194
2195 if (o == top()) return nullptr; // TOP always merges into TOP
2196
2197 if (o->is_Phi() && o->as_Phi()->region() == region) {
2198 return o->as_Phi();
2199 }
2200 InlineTypeNode* vt = o->isa_InlineType();
2201 if (vt != nullptr && vt->has_phi_inputs(region)) {
2202 return vt;
2203 }
2204
2205 // Now use a Phi here for merging
2206 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2207 const JVMState* jvms = map->jvms();
2208 const Type* t = nullptr;
2209 if (jvms->is_loc(idx)) {
2210 t = block()->local_type_at(idx - jvms->locoff());
2211 } else if (jvms->is_stk(idx)) {
2212 t = block()->stack_type_at(idx - jvms->stkoff());
2213 } else if (jvms->is_mon(idx)) {
2214 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2215 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2216 } else if ((uint)idx < TypeFunc::Parms) {
2217 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2218 } else {
2219 assert(false, "no type information for this phi");
2220 }
2221
2222 // If the type falls to bottom, then this must be a local that
2223 // is already dead or is mixing ints and oops or some such.
2224 // Forcing it to top makes it go dead.
2225 if (t == Type::BOTTOM) {
2226 map->set_req(idx, top());
2227 return nullptr;
2228 }
2229
2230 // Do not create phis for top either.
2231 // A top on a non-null control flow must be an unused even after the.phi.
2232 if (t == Type::TOP || t == Type::HALF) {
2233 map->set_req(idx, top());
2234 return nullptr;
2235 }
2236
2237 if (vt != nullptr && t->is_inlinetypeptr()) {
2238 // Inline types are merged by merging their field values.
2239 // Create a cloned InlineTypeNode with phi inputs that
2240 // represents the merged inline type and update the map.
2241 vt = vt->clone_with_phis(&_gvn, region);
2242 map->set_req(idx, vt);
2243 return vt;
2244 } else {
2245 PhiNode* phi = PhiNode::make(region, o, t);
2246 gvn().set_type(phi, t);
2247 if (C->do_escape_analysis()) record_for_igvn(phi);
2248 map->set_req(idx, phi);
2249 return phi;
2250 }
2251 }
2252
2253 //--------------------------ensure_memory_phi----------------------------------
2254 // Turn the idx'th slice of the current memory into a Phi
2255 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2256 MergeMemNode* mem = merged_memory();
2257 Node* region = control();
2258 assert(region->is_Region(), "");
2259
2260 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2261 assert(o != nullptr && o != top(), "");
2262
2263 PhiNode* phi;
2264 if (o->is_Phi() && o->as_Phi()->region() == region) {
2265 phi = o->as_Phi();
2266 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2267 // clone the shared base memory phi to make a new memory split
2268 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2269 const Type* t = phi->bottom_type();
2270 const TypePtr* adr_type = C->get_adr_type(idx);
2360 // Add check to deoptimize once holder klass is fully initialized.
2361 void Parse::clinit_deopt() {
2362 assert(C->has_method(), "only for normal compilations");
2363 assert(depth() == 1, "only for main compiled method");
2364 assert(is_normal_parse(), "no barrier needed on osr entry");
2365 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2366
2367 set_parse_bci(0);
2368
2369 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2370 guard_klass_being_initialized(holder);
2371 }
2372
2373 //------------------------------return_current---------------------------------
2374 // Append current _map to _exit_return
2375 void Parse::return_current(Node* value) {
2376 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2377 call_register_finalizer();
2378 }
2379
2380 // frame pointer is always same, already captured
2381 if (value != nullptr) {
2382 Node* phi = _exits.argument(0);
2383 const Type* return_type = phi->bottom_type();
2384 const TypeInstPtr* tr = return_type->isa_instptr();
2385 if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2386 return_type->is_inlinetypeptr()) {
2387 // Inline type is returned as fields, make sure it is scalarized
2388 if (!value->is_InlineType()) {
2389 value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
2390 }
2391 if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2392 // Returning from root or an incrementally inlined method. Make sure all non-flat
2393 // fields are buffered and re-execute if allocation triggers deoptimization.
2394 PreserveReexecuteState preexecs(this);
2395 assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2396 jvms()->set_should_reexecute(true);
2397 inc_sp(1);
2398 value = value->as_InlineType()->allocate_fields(this);
2399 }
2400 } else if (value->is_InlineType()) {
2401 // Inline type is returned as oop, make sure it is buffered and re-execute
2402 // if allocation triggers deoptimization.
2403 PreserveReexecuteState preexecs(this);
2404 jvms()->set_should_reexecute(true);
2405 inc_sp(1);
2406 value = value->as_InlineType()->buffer(this);
2407 }
2408 // ...else
2409 // If returning oops to an interface-return, there is a silent free
2410 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2411 phi->add_req(value);
2412 }
2413
2414 // Do not set_parse_bci, so that return goo is credited to the return insn.
2415 set_bci(InvocationEntryBci);
2416 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2417 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2418 }
2419 if (C->env()->dtrace_method_probes()) {
2420 make_dtrace_method_exit(method());
2421 }
2422
2423 SafePointNode* exit_return = _exits.map();
2424 exit_return->in( TypeFunc::Control )->add_req( control() );
2425 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2426 Node *mem = exit_return->in( TypeFunc::Memory );
2427 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2428 if (mms.is_empty()) {
2429 // get a copy of the base memory, and patch just this one input
2430 const TypePtr* adr_type = mms.adr_type(C);
2431 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2432 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2433 gvn().set_type_bottom(phi);
2434 phi->del_req(phi->req()-1); // prepare to re-patch
2435 mms.set_memory(phi);
2436 }
2437 mms.memory()->add_req(mms.memory2());
2438 }
2439
2440 if (_first_return) {
2441 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2442 _first_return = false;
2443 } else {
2444 _exits.map()->merge_replaced_nodes_with(map());
2445 }
2446
2447 stop_and_kill_map(); // This CFG path dies here
2448 }
2449
2450
2451 //------------------------------add_safepoint----------------------------------
2452 void Parse::add_safepoint() {
2453 uint parms = TypeFunc::Parms+1;
2454
2455 // Clear out dead values from the debug info.
2456 kill_dead_locals();
2457
2458 // Clone the JVM State
2459 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|