1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "interpreter/linkResolver.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/method.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/c2compiler.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/idealGraphPrinter.hpp"
33 #include "opto/locknode.hpp"
34 #include "opto/memnode.hpp"
35 #include "opto/opaquenode.hpp"
36 #include "opto/parse.hpp"
37 #include "opto/rootnode.hpp"
38 #include "opto/runtime.hpp"
39 #include "opto/type.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/safepointMechanism.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "utilities/bitMap.inline.hpp"
44 #include "utilities/copy.hpp"
45
46 // Static array so we can figure out which bytecodes stop us from compiling
47 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
48 // and eventually should be encapsulated in a proper class (gri 8/18/98).
49
50 #ifndef PRODUCT
51 uint nodes_created = 0;
52 uint methods_parsed = 0;
53 uint methods_seen = 0;
54 uint blocks_parsed = 0;
55 uint blocks_seen = 0;
56
57 uint explicit_null_checks_inserted = 0;
58 uint explicit_null_checks_elided = 0;
59 uint all_null_checks_found = 0;
84 }
85 if (all_null_checks_found) {
86 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
87 (100*implicit_null_checks)/all_null_checks_found);
88 }
89 if (SharedRuntime::_implicit_null_throws) {
90 tty->print_cr("%u implicit null exceptions at runtime",
91 SharedRuntime::_implicit_null_throws);
92 }
93
94 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
95 BytecodeParseHistogram::print();
96 }
97 }
98 #endif
99
100 //------------------------------ON STACK REPLACEMENT---------------------------
101
102 // Construct a node which can be used to get incoming state for
103 // on stack replacement.
104 Node *Parse::fetch_interpreter_state(int index,
105 BasicType bt,
106 Node *local_addrs,
107 Node *local_addrs_base) {
108 Node *mem = memory(Compile::AliasIdxRaw);
109 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
110 Node *ctl = control();
111
112 // Very similar to LoadNode::make, except we handle un-aligned longs and
113 // doubles on Sparc. Intel can handle them just fine directly.
114 Node *l = nullptr;
115 switch (bt) { // Signature is flattened
116 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
117 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
118 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
119 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
120 case T_LONG:
121 case T_DOUBLE: {
122 // Since arguments are in reverse order, the argument address 'adr'
123 // refers to the back half of the long/double. Recompute adr.
124 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
125 if (Matcher::misaligned_doubles_ok) {
126 l = (bt == T_DOUBLE)
127 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
128 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
129 } else {
130 l = (bt == T_DOUBLE)
131 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
132 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
133 }
134 break;
135 }
136 default: ShouldNotReachHere();
137 }
138 return _gvn.transform(l);
139 }
140
141 // Helper routine to prevent the interpreter from handing
142 // unexpected typestate to an OSR method.
143 // The Node l is a value newly dug out of the interpreter frame.
144 // The type is the type predicted by ciTypeFlow. Note that it is
145 // not a general type, but can only come from Type::get_typeflow_type.
146 // The safepoint is a map which will feed an uncommon trap.
147 Node* Parse::check_interpreter_type(Node* l, const Type* type,
148 SafePointNode* &bad_type_exit) {
149
150 const TypeOopPtr* tp = type->isa_oopptr();
151
152 // TypeFlow may assert null-ness if a type appears unloaded.
153 if (type == TypePtr::NULL_PTR ||
154 (tp != nullptr && !tp->is_loaded())) {
155 // Value must be null, not a real oop.
156 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
157 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
158 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
159 set_control(_gvn.transform( new IfTrueNode(iff) ));
160 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
161 bad_type_exit->control()->add_req(bad_type);
162 l = null();
163 }
164
165 // Typeflow can also cut off paths from the CFG, based on
166 // types which appear unloaded, or call sites which appear unlinked.
167 // When paths are cut off, values at later merge points can rise
168 // toward more specific classes. Make sure these specific classes
169 // are still in effect.
170 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
171 // TypeFlow asserted a specific object type. Value must have that type.
172 Node* bad_type_ctrl = nullptr;
173 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
174 bad_type_exit->control()->add_req(bad_type_ctrl);
175 }
176
177 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
178 return l;
179 }
180
181 // Helper routine which sets up elements of the initial parser map when
182 // performing a parse for on stack replacement. Add values into map.
183 // The only parameter contains the address of a interpreter arguments.
184 void Parse::load_interpreter_state(Node* osr_buf) {
185 int index;
186 int max_locals = jvms()->loc_size();
187 int max_stack = jvms()->stk_size();
188
189
190 // Mismatch between method and jvms can occur since map briefly held
191 // an OSR entry state (which takes up one RawPtr word).
192 assert(max_locals == method()->max_locals(), "sanity");
193 assert(max_stack >= method()->max_stack(), "sanity");
194 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
195 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
196
197 // Find the start block.
198 Block* osr_block = start_block();
199 assert(osr_block->start() == osr_bci(), "sanity");
200
201 // Set initial BCI.
202 set_parse_bci(osr_block->start());
203
204 // Set initial stack depth.
205 set_sp(osr_block->start_sp());
206
207 // Check bailouts. We currently do not perform on stack replacement
208 // of loops in catch blocks or loops which branch with a non-empty stack.
209 if (sp() != 0) {
224 for (index = 0; index < mcnt; index++) {
225 // Make a BoxLockNode for the monitor.
226 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
227 // Check for bailout after new BoxLockNode
228 if (failing()) { return; }
229
230 // This OSR locking region is unbalanced because it does not have Lock node:
231 // locking was done in Interpreter.
232 // This is similar to Coarsened case when Lock node is eliminated
233 // and as result the region is marked as Unbalanced.
234
235 // Emulate Coarsened state transition from Regular to Unbalanced.
236 osr_box->set_coarsened();
237 osr_box->set_unbalanced();
238
239 Node* box = _gvn.transform(osr_box);
240
241 // Displaced headers and locked objects are interleaved in the
242 // temp OSR buffer. We only copy the locked objects out here.
243 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
244 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
245 // Try and copy the displaced header to the BoxNode
246 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
247
248
249 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
250
251 // Build a bogus FastLockNode (no code will be generated) and push the
252 // monitor into our debug info.
253 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
254 map()->push_monitor(flock);
255
256 // If the lock is our method synchronization lock, tuck it away in
257 // _sync_lock for return and rethrow exit paths.
258 if (index == 0 && method()->is_synchronized()) {
259 _synch_lock = flock;
260 }
261 }
262
263 // Use the raw liveness computation to make sure that unexpected
264 // values don't propagate into the OSR frame.
265 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
266 if (!live_locals.is_valid()) {
267 // Degenerate or breakpointed method.
295 if (C->log() != nullptr) {
296 C->log()->elem("OSR_mismatch local_index='%d'",index);
297 }
298 set_local(index, null());
299 // and ignore it for the loads
300 continue;
301 }
302 }
303
304 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
305 if (type == Type::TOP || type == Type::HALF) {
306 continue;
307 }
308 // If the type falls to bottom, then this must be a local that
309 // is mixing ints and oops or some such. Forcing it to top
310 // makes it go dead.
311 if (type == Type::BOTTOM) {
312 continue;
313 }
314 // Construct code to access the appropriate local.
315 BasicType bt = type->basic_type();
316 if (type == TypePtr::NULL_PTR) {
317 // Ptr types are mixed together with T_ADDRESS but null is
318 // really for T_OBJECT types so correct it.
319 bt = T_OBJECT;
320 }
321 Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
322 set_local(index, value);
323 }
324
325 // Extract the needed stack entries from the interpreter frame.
326 for (index = 0; index < sp(); index++) {
327 const Type *type = osr_block->stack_type_at(index);
328 if (type != Type::TOP) {
329 // Currently the compiler bails out when attempting to on stack replace
330 // at a bci with a non-empty stack. We should not reach here.
331 ShouldNotReachHere();
332 }
333 }
334
335 // End the OSR migration
336 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
337 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
338 "OSR_migration_end", TypeRawPtr::BOTTOM,
339 osr_buf);
340
341 // Now that the interpreter state is loaded, make sure it will match
352 if (type->isa_oopptr() != nullptr) {
353 if (!live_oops.at(index)) {
354 // skip type check for dead oops
355 continue;
356 }
357 }
358 if (osr_block->flow()->local_type_at(index)->is_return_address()) {
359 // In our current system it's illegal for jsr addresses to be
360 // live into an OSR entry point because the compiler performs
361 // inlining of jsrs. ciTypeFlow has a bailout that detect this
362 // case and aborts the compile if addresses are live into an OSR
363 // entry point. Because of that we can assume that any address
364 // locals at the OSR entry point are dead. Method liveness
365 // isn't precise enough to figure out that they are dead in all
366 // cases so simply skip checking address locals all
367 // together. Any type check is guaranteed to fail since the
368 // interpreter type is the result of a load which might have any
369 // value and the expected type is a constant.
370 continue;
371 }
372 set_local(index, check_interpreter_type(l, type, bad_type_exit));
373 }
374
375 for (index = 0; index < sp(); index++) {
376 if (stopped()) break;
377 Node* l = stack(index);
378 if (l->is_top()) continue; // nothing here
379 const Type *type = osr_block->stack_type_at(index);
380 set_stack(index, check_interpreter_type(l, type, bad_type_exit));
381 }
382
383 if (bad_type_exit->control()->req() > 1) {
384 // Build an uncommon trap here, if any inputs can be unexpected.
385 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
386 record_for_igvn(bad_type_exit->control());
387 SafePointNode* types_are_good = map();
388 set_map(bad_type_exit);
389 // The unexpected type happens because a new edge is active
390 // in the CFG, which typeflow had previously ignored.
391 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
392 // This x will be typed as Integer if notReached is not yet linked.
393 // It could also happen due to a problem in ciTypeFlow analysis.
394 uncommon_trap(Deoptimization::Reason_constraint,
395 Deoptimization::Action_reinterpret);
396 set_map(types_are_good);
397 }
398 }
399
400 //------------------------------Parse------------------------------------------
501 // either breakpoint setting or hotswapping of methods may
502 // cause deoptimization.
503 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
504 C->dependencies()->assert_evol_method(method());
505 }
506
507 NOT_PRODUCT(methods_seen++);
508
509 // Do some special top-level things.
510 if (depth() == 1 && C->is_osr_compilation()) {
511 _tf = C->tf(); // the OSR entry type is different
512 _entry_bci = C->entry_bci();
513 _flow = method()->get_osr_flow_analysis(osr_bci());
514 } else {
515 _tf = TypeFunc::make(method());
516 _entry_bci = InvocationEntryBci;
517 _flow = method()->get_flow_analysis();
518 }
519
520 if (_flow->failing()) {
521 assert(false, "type flow analysis failed during parsing");
522 C->record_method_not_compilable(_flow->failure_reason());
523 #ifndef PRODUCT
524 if (PrintOpto && (Verbose || WizardMode)) {
525 if (is_osr_parse()) {
526 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
527 } else {
528 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
529 }
530 if (Verbose) {
531 method()->print();
532 method()->print_codes();
533 _flow->print();
534 }
535 }
536 #endif
537 }
538
539 #ifdef ASSERT
540 if (depth() == 1) {
541 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
592 load_interpreter_state(osr_buf);
593 } else {
594 set_map(entry_map);
595 do_method_entry();
596 }
597
598 if (depth() == 1 && !failing()) {
599 if (C->clinit_barrier_on_entry()) {
600 // Add check to deoptimize the nmethod once the holder class is fully initialized
601 clinit_deopt();
602 }
603 }
604
605 // Check for bailouts during method entry.
606 if (failing()) {
607 if (log) log->done("parse");
608 C->set_default_node_notes(caller_nn);
609 return;
610 }
611
612 entry_map = map(); // capture any changes performed by method setup code
613 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
614
615 // We begin parsing as if we have just encountered a jump to the
616 // method entry.
617 Block* entry_block = start_block();
618 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
619 set_map_clone(entry_map);
620 merge_common(entry_block, entry_block->next_path_num());
621
622 #ifndef PRODUCT
623 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
624 set_parse_histogram( parse_histogram_obj );
625 #endif
626
627 // Parse all the basic blocks.
628 do_all_blocks();
629
630 // Check for bailouts during conversion to graph
631 if (failing()) {
777 void Parse::build_exits() {
778 // make a clone of caller to prevent sharing of side-effects
779 _exits.set_map(_exits.clone_map());
780 _exits.clean_stack(_exits.sp());
781 _exits.sync_jvms();
782
783 RegionNode* region = new RegionNode(1);
784 record_for_igvn(region);
785 gvn().set_type_bottom(region);
786 _exits.set_control(region);
787
788 // Note: iophi and memphi are not transformed until do_exits.
789 Node* iophi = new PhiNode(region, Type::ABIO);
790 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
791 gvn().set_type_bottom(iophi);
792 gvn().set_type_bottom(memphi);
793 _exits.set_i_o(iophi);
794 _exits.set_all_memory(memphi);
795
796 // Add a return value to the exit state. (Do not push it yet.)
797 if (tf()->range()->cnt() > TypeFunc::Parms) {
798 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
799 if (ret_type->isa_int()) {
800 BasicType ret_bt = method()->return_type()->basic_type();
801 if (ret_bt == T_BOOLEAN ||
802 ret_bt == T_CHAR ||
803 ret_bt == T_BYTE ||
804 ret_bt == T_SHORT) {
805 ret_type = TypeInt::INT;
806 }
807 }
808
809 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
810 // becomes loaded during the subsequent parsing, the loaded and unloaded
811 // types will not join when we transform and push in do_exits().
812 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
813 if (ret_oop_type && !ret_oop_type->is_loaded()) {
814 ret_type = TypeOopPtr::BOTTOM;
815 }
816 int ret_size = type2size[ret_type->basic_type()];
817 Node* ret_phi = new PhiNode(region, ret_type);
818 gvn().set_type_bottom(ret_phi);
819 _exits.ensure_stack(ret_size);
820 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
821 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
822 _exits.set_argument(0, ret_phi); // here is where the parser finds it
823 // Note: ret_phi is not yet pushed, until do_exits.
824 }
825 }
826
827
828 //----------------------------build_start_state-------------------------------
829 // Construct a state which contains only the incoming arguments from an
830 // unknown caller. The method & bci will be null & InvocationEntryBci.
831 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
832 int arg_size = tf->domain()->cnt();
833 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
834 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
835 SafePointNode* map = new SafePointNode(max_size, jvms);
836 record_for_igvn(map);
837 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
838 Node_Notes* old_nn = default_node_notes();
839 if (old_nn != nullptr && has_method()) {
840 Node_Notes* entry_nn = old_nn->clone(this);
841 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
842 entry_jvms->set_offsets(0);
843 entry_jvms->set_bci(entry_bci());
844 entry_nn->set_jvms(entry_jvms);
845 set_default_node_notes(entry_nn);
846 }
847 uint i;
848 for (i = 0; i < (uint)arg_size; i++) {
849 Node* parm = initial_gvn()->transform(new ParmNode(start, i));
850 map->init_req(i, parm);
851 // Record all these guys for later GVN.
852 record_for_igvn(parm);
853 }
854 for (; i < map->req(); i++) {
855 map->init_req(i, top());
856 }
857 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
858 set_default_node_notes(old_nn);
859 jvms->set_map(map);
860 return jvms;
861 }
862
863 //-----------------------------make_node_notes---------------------------------
864 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
865 if (caller_nn == nullptr) return nullptr;
866 Node_Notes* nn = caller_nn->clone(C);
867 JVMState* caller_jvms = nn->jvms();
868 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
869 jvms->set_offsets(0);
870 jvms->set_bci(_entry_bci);
871 nn->set_jvms(jvms);
872 return nn;
873 }
874
875
876 //--------------------------return_values--------------------------------------
877 void Compile::return_values(JVMState* jvms) {
878 GraphKit kit(jvms);
879 Node* ret = new ReturnNode(TypeFunc::Parms,
880 kit.control(),
881 kit.i_o(),
882 kit.reset_memory(),
883 kit.frameptr(),
884 kit.returnadr());
885 // Add zero or 1 return values
886 int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
887 if (ret_size > 0) {
888 kit.inc_sp(-ret_size); // pop the return value(s)
889 kit.sync_jvms();
890 ret->add_req(kit.argument(0));
891 // Note: The second dummy edge is not needed by a ReturnNode.
892 }
893 // bind it to root
894 root()->add_req(ret);
895 record_for_igvn(ret);
896 initial_gvn()->transform(ret);
897 }
898
899 //------------------------rethrow_exceptions-----------------------------------
900 // Bind all exception states in the list into a single RethrowNode.
901 void Compile::rethrow_exceptions(JVMState* jvms) {
902 GraphKit kit(jvms);
903 if (!kit.has_exceptions()) return; // nothing to generate
904 // Load my combined exception state into the kit, with all phis transformed:
905 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
906 Node* ex_oop = kit.use_exception_state(ex_map);
907 RethrowNode* exit = new RethrowNode(kit.control(),
908 kit.i_o(), kit.reset_memory(),
909 kit.frameptr(), kit.returnadr(),
910 // like a return but with exception input
911 ex_oop);
995 // to complete, we force all writes to complete.
996 //
997 // 2. Experimental VM option is used to force the barrier if any field
998 // was written out in the constructor.
999 //
1000 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1001 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1002 // MemBarVolatile is used before volatile load instead of after volatile
1003 // store, so there's no barrier after the store.
1004 // We want to guarantee the same behavior as on platforms with total store
1005 // order, although this is not required by the Java memory model.
1006 // In this case, we want to enforce visibility of volatile field
1007 // initializations which are performed in constructors.
1008 // So as with finals, we add a barrier here.
1009 //
1010 // "All bets are off" unless the first publication occurs after a
1011 // normal return from the constructor. We do not attempt to detect
1012 // such unusual early publications. But no barrier is needed on
1013 // exceptional returns, since they cannot publish normally.
1014 //
1015 if (method()->is_object_initializer() &&
1016 (wrote_final() || wrote_stable() ||
1017 (AlwaysSafeConstructors && wrote_fields()) ||
1018 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1019 Node* recorded_alloc = alloc_with_final_or_stable();
1020 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1021 recorded_alloc);
1022
1023 // If Memory barrier is created for final fields write
1024 // and allocation node does not escape the initialize method,
1025 // then barrier introduced by allocation node can be removed.
1026 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1027 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1028 alloc->compute_MemBar_redundancy(method());
1029 }
1030 if (PrintOpto && (Verbose || WizardMode)) {
1031 method()->print_name();
1032 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1033 }
1034 }
1035
1036 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1037 // transform each slice of the original memphi:
1038 mms.set_memory(_gvn.transform(mms.memory()));
1039 }
1040 // Clean up input MergeMems created by transforming the slices
1041 _gvn.transform(_exits.merged_memory());
1042
1043 if (tf()->range()->cnt() > TypeFunc::Parms) {
1044 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1045 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1046 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1047 // If the type we set for the ret_phi in build_exits() is too optimistic and
1048 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1049 // loading. It could also be due to an error, so mark this method as not compilable because
1050 // otherwise this could lead to an infinite compile loop.
1051 // In any case, this code path is rarely (and never in my testing) reached.
1052 C->record_method_not_compilable("Can't determine return type.");
1053 return;
1054 }
1055 if (ret_type->isa_int()) {
1056 BasicType ret_bt = method()->return_type()->basic_type();
1057 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1058 }
1059 _exits.push_node(ret_type->basic_type(), ret_phi);
1060 }
1061
1062 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1063
1064 // Unlock along the exceptional paths.
1118
1119 //-----------------------------create_entry_map-------------------------------
1120 // Initialize our parser map to contain the types at method entry.
1121 // For OSR, the map contains a single RawPtr parameter.
1122 // Initial monitor locking for sync. methods is performed by do_method_entry.
1123 SafePointNode* Parse::create_entry_map() {
1124 // Check for really stupid bail-out cases.
1125 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1126 if (len >= 32760) {
1127 // Bailout expected, this is a very rare edge case.
1128 C->record_method_not_compilable("too many local variables");
1129 return nullptr;
1130 }
1131
1132 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1133 _caller->map()->delete_replaced_nodes();
1134
1135 // If this is an inlined method, we may have to do a receiver null check.
1136 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1137 GraphKit kit(_caller);
1138 kit.null_check_receiver_before_call(method());
1139 _caller = kit.transfer_exceptions_into_jvms();
1140 if (kit.stopped()) {
1141 _exits.add_exception_states_from(_caller);
1142 _exits.set_jvms(_caller);
1143 return nullptr;
1144 }
1145 }
1146
1147 assert(method() != nullptr, "parser must have a method");
1148
1149 // Create an initial safepoint to hold JVM state during parsing
1150 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1151 set_map(new SafePointNode(len, jvms));
1152
1153 // Capture receiver info for compiled lambda forms.
1154 if (method()->is_compiled_lambda_form()) {
1155 ciInstance* recv_info = _caller->compute_receiver_info(method());
1156 jvms->set_receiver_info(recv_info);
1157 }
1158
1159 jvms->set_map(map());
1163 SafePointNode* inmap = _caller->map();
1164 assert(inmap != nullptr, "must have inmap");
1165 // In case of null check on receiver above
1166 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1167
1168 uint i;
1169
1170 // Pass thru the predefined input parameters.
1171 for (i = 0; i < TypeFunc::Parms; i++) {
1172 map()->init_req(i, inmap->in(i));
1173 }
1174
1175 if (depth() == 1) {
1176 assert(map()->memory()->Opcode() == Op_Parm, "");
1177 // Insert the memory aliasing node
1178 set_all_memory(reset_memory());
1179 }
1180 assert(merged_memory(), "");
1181
1182 // Now add the locals which are initially bound to arguments:
1183 uint arg_size = tf()->domain()->cnt();
1184 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1185 for (i = TypeFunc::Parms; i < arg_size; i++) {
1186 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1187 }
1188
1189 // Clear out the rest of the map (locals and stack)
1190 for (i = arg_size; i < len; i++) {
1191 map()->init_req(i, top());
1192 }
1193
1194 SafePointNode* entry_map = stop();
1195 return entry_map;
1196 }
1197
1198 //-----------------------------do_method_entry--------------------------------
1199 // Emit any code needed in the pseudo-block before BCI zero.
1200 // The main thing to do is lock the receiver of a synchronized method.
1201 void Parse::do_method_entry() {
1202 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1203 set_sp(0); // Java Stack Pointer
1204
1205 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1206
1207 if (C->env()->dtrace_method_probes()) {
1208 make_dtrace_method_entry(method());
1209 }
1210
1211 #ifdef ASSERT
1212 // Narrow receiver type when it is too broad for the method being parsed.
1213 if (!method()->is_static()) {
1214 ciInstanceKlass* callee_holder = method()->holder();
1215 const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1216
1217 Node* receiver_obj = local(0);
1218 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1219
1220 if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1221 // Receiver should always be a subtype of callee holder.
1222 // But, since C2 type system doesn't properly track interfaces,
1223 // the invariant can't be expressed in the type system for default methods.
1224 // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1225 assert(callee_holder->is_interface(), "missing subtype check");
1226
1236
1237 // If the method is synchronized, we need to construct a lock node, attach
1238 // it to the Start node, and pin it there.
1239 if (method()->is_synchronized()) {
1240 // Insert a FastLockNode right after the Start which takes as arguments
1241 // the current thread pointer, the "this" pointer & the address of the
1242 // stack slot pair used for the lock. The "this" pointer is a projection
1243 // off the start node, but the locking spot has to be constructed by
1244 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1245 // becomes the second argument to the FastLockNode call. The
1246 // FastLockNode becomes the new control parent to pin it to the start.
1247
1248 // Setup Object Pointer
1249 Node *lock_obj = nullptr;
1250 if (method()->is_static()) {
1251 ciInstance* mirror = _method->holder()->java_mirror();
1252 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1253 lock_obj = makecon(t_lock);
1254 } else { // Else pass the "this" pointer,
1255 lock_obj = local(0); // which is Parm0 from StartNode
1256 }
1257 // Clear out dead values from the debug info.
1258 kill_dead_locals();
1259 // Build the FastLockNode
1260 _synch_lock = shared_lock(lock_obj);
1261 // Check for bailout in shared_lock
1262 if (failing()) { return; }
1263 }
1264
1265 // Feed profiling data for parameters to the type system so it can
1266 // propagate it as speculative types
1267 record_profiled_parameters_for_speculation();
1268 }
1269
1270 //------------------------------init_blocks------------------------------------
1271 // Initialize our parser map to contain the types/monitors at method entry.
1272 void Parse::init_blocks() {
1273 // Create the blocks.
1274 _block_count = flow()->block_count();
1275 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1671 //--------------------handle_missing_successor---------------------------------
1672 void Parse::handle_missing_successor(int target_bci) {
1673 #ifndef PRODUCT
1674 Block* b = block();
1675 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1676 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1677 #endif
1678 ShouldNotReachHere();
1679 }
1680
1681 //--------------------------merge_common---------------------------------------
1682 void Parse::merge_common(Parse::Block* target, int pnum) {
1683 if (TraceOptoParse) {
1684 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1685 }
1686
1687 // Zap extra stack slots to top
1688 assert(sp() == target->start_sp(), "");
1689 clean_stack(sp());
1690
1691 if (!target->is_merged()) { // No prior mapping at this bci
1692 if (TraceOptoParse) { tty->print(" with empty state"); }
1693
1694 // If this path is dead, do not bother capturing it as a merge.
1695 // It is "as if" we had 1 fewer predecessors from the beginning.
1696 if (stopped()) {
1697 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1698 return;
1699 }
1700
1701 // Make a region if we know there are multiple or unpredictable inputs.
1702 // (Also, if this is a plain fall-through, we might see another region,
1703 // which must not be allowed into this block's map.)
1704 if (pnum > PhiNode::Input // Known multiple inputs.
1705 || target->is_handler() // These have unpredictable inputs.
1706 || target->is_loop_head() // Known multiple inputs
1707 || control()->is_Region()) { // We must hide this guy.
1708
1709 int current_bci = bci();
1710 set_parse_bci(target->start()); // Set target bci
1725 record_for_igvn(r);
1726 // zap all inputs to null for debugging (done in Node(uint) constructor)
1727 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1728 r->init_req(pnum, control());
1729 set_control(r);
1730 target->copy_irreducible_status_to(r, jvms());
1731 set_parse_bci(current_bci); // Restore bci
1732 }
1733
1734 // Convert the existing Parser mapping into a mapping at this bci.
1735 store_state_to(target);
1736 assert(target->is_merged(), "do not come here twice");
1737
1738 } else { // Prior mapping at this bci
1739 if (TraceOptoParse) { tty->print(" with previous state"); }
1740 #ifdef ASSERT
1741 if (target->is_SEL_head()) {
1742 target->mark_merged_backedge(block());
1743 }
1744 #endif
1745 // We must not manufacture more phis if the target is already parsed.
1746 bool nophi = target->is_parsed();
1747
1748 SafePointNode* newin = map();// Hang on to incoming mapping
1749 Block* save_block = block(); // Hang on to incoming block;
1750 load_state_from(target); // Get prior mapping
1751
1752 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1753 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1754 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1755 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1756
1757 // Iterate over my current mapping and the old mapping.
1758 // Where different, insert Phi functions.
1759 // Use any existing Phi functions.
1760 assert(control()->is_Region(), "must be merging to a region");
1761 RegionNode* r = control()->as_Region();
1762
1763 // Compute where to merge into
1764 // Merge incoming control path
1765 r->init_req(pnum, newin->control());
1766
1767 if (pnum == 1) { // Last merge for this Region?
1768 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1769 Node* result = _gvn.transform(r);
1770 if (r != result && TraceOptoParse) {
1771 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1772 }
1773 }
1774 record_for_igvn(r);
1775 }
1776
1777 // Update all the non-control inputs to map:
1778 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1779 bool check_elide_phi = target->is_SEL_backedge(save_block);
1780 for (uint j = 1; j < newin->req(); j++) {
1781 Node* m = map()->in(j); // Current state of target.
1782 Node* n = newin->in(j); // Incoming change to target state.
1783 PhiNode* phi;
1784 if (m->is_Phi() && m->as_Phi()->region() == r)
1785 phi = m->as_Phi();
1786 else
1787 phi = nullptr;
1788 if (m != n) { // Different; must merge
1789 switch (j) {
1790 // Frame pointer and Return Address never changes
1791 case TypeFunc::FramePtr:// Drop m, use the original value
1792 case TypeFunc::ReturnAdr:
1793 break;
1794 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1795 assert(phi == nullptr, "the merge contains phis, not vice versa");
1796 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1797 continue;
1798 default: // All normal stuff
1799 if (phi == nullptr) {
1800 const JVMState* jvms = map()->jvms();
1801 if (EliminateNestedLocks &&
1802 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1803 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1804 // Use old BoxLock node as merged box.
1805 assert(newin->jvms()->is_monitor_box(j), "sanity");
1806 // This assert also tests that nodes are BoxLock.
1807 assert(BoxLockNode::same_slot(n, m), "sanity");
1814 // Incremental Inlining before EA and Macro nodes elimination.
1815 //
1816 // Incremental Inlining is executed after IGVN optimizations
1817 // during which BoxLock can be marked as Coarsened.
1818 old_box->set_coarsened(); // Verifies state
1819 old_box->set_unbalanced();
1820 }
1821 C->gvn_replace_by(n, m);
1822 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1823 phi = ensure_phi(j, nophi);
1824 }
1825 }
1826 break;
1827 }
1828 }
1829 // At this point, n might be top if:
1830 // - there is no phi (because TypeFlow detected a conflict), or
1831 // - the corresponding control edges is top (a dead incoming path)
1832 // It is a bug if we create a phi which sees a garbage value on a live path.
1833
1834 if (phi != nullptr) {
1835 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1836 assert(phi->region() == r, "");
1837 phi->set_req(pnum, n); // Then add 'n' to the merge
1838 if (pnum == PhiNode::Input) {
1839 // Last merge for this Phi.
1840 // So far, Phis have had a reasonable type from ciTypeFlow.
1841 // Now _gvn will join that with the meet of current inputs.
1842 // BOTTOM is never permissible here, 'cause pessimistically
1843 // Phis of pointers cannot lose the basic pointer type.
1844 DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
1845 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1846 map()->set_req(j, _gvn.transform(phi));
1847 DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
1848 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1849 record_for_igvn(phi);
1850 }
1851 }
1852 } // End of for all values to be merged
1853
1854 if (pnum == PhiNode::Input &&
1855 !r->in(0)) { // The occasional useless Region
1856 assert(control() == r, "");
1857 set_control(r->nonnull_req());
1858 }
1859
1860 map()->merge_replaced_nodes_with(newin);
1861
1862 // newin has been subsumed into the lazy merge, and is now dead.
1863 set_block(save_block);
1864
1865 stop(); // done with this guy, for now
1866 }
1867
1868 if (TraceOptoParse) {
1869 tty->print_cr(" on path %d", pnum);
1870 }
1871
1872 // Done with this parser state.
1873 assert(stopped(), "");
1874 }
1875
1987
1988 // Add new path to the region.
1989 uint pnum = r->req();
1990 r->add_req(nullptr);
1991
1992 for (uint i = 1; i < map->req(); i++) {
1993 Node* n = map->in(i);
1994 if (i == TypeFunc::Memory) {
1995 // Ensure a phi on all currently known memories.
1996 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1997 Node* phi = mms.memory();
1998 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1999 assert(phi->req() == pnum, "must be same size as region");
2000 phi->add_req(nullptr);
2001 }
2002 }
2003 } else {
2004 if (n->is_Phi() && n->as_Phi()->region() == r) {
2005 assert(n->req() == pnum, "must be same size as region");
2006 n->add_req(nullptr);
2007 }
2008 }
2009 }
2010
2011 return pnum;
2012 }
2013
2014 //------------------------------ensure_phi-------------------------------------
2015 // Turn the idx'th entry of the current map into a Phi
2016 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2017 SafePointNode* map = this->map();
2018 Node* region = map->control();
2019 assert(region->is_Region(), "");
2020
2021 Node* o = map->in(idx);
2022 assert(o != nullptr, "");
2023
2024 if (o == top()) return nullptr; // TOP always merges into TOP
2025
2026 if (o->is_Phi() && o->as_Phi()->region() == region) {
2027 return o->as_Phi();
2028 }
2029
2030 // Now use a Phi here for merging
2031 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2032 const JVMState* jvms = map->jvms();
2033 const Type* t = nullptr;
2034 if (jvms->is_loc(idx)) {
2035 t = block()->local_type_at(idx - jvms->locoff());
2036 } else if (jvms->is_stk(idx)) {
2037 t = block()->stack_type_at(idx - jvms->stkoff());
2038 } else if (jvms->is_mon(idx)) {
2039 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2040 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2041 } else if ((uint)idx < TypeFunc::Parms) {
2042 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2043 } else {
2044 assert(false, "no type information for this phi");
2045 }
2046
2047 // If the type falls to bottom, then this must be a local that
2048 // is mixing ints and oops or some such. Forcing it to top
2049 // makes it go dead.
2050 if (t == Type::BOTTOM) {
2051 map->set_req(idx, top());
2052 return nullptr;
2053 }
2054
2055 // Do not create phis for top either.
2056 // A top on a non-null control flow must be an unused even after the.phi.
2057 if (t == Type::TOP || t == Type::HALF) {
2058 map->set_req(idx, top());
2059 return nullptr;
2060 }
2061
2062 PhiNode* phi = PhiNode::make(region, o, t);
2063 gvn().set_type(phi, t);
2064 if (C->do_escape_analysis()) record_for_igvn(phi);
2065 map->set_req(idx, phi);
2066 return phi;
2067 }
2068
2069 //--------------------------ensure_memory_phi----------------------------------
2070 // Turn the idx'th slice of the current memory into a Phi
2071 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2072 MergeMemNode* mem = merged_memory();
2073 Node* region = control();
2074 assert(region->is_Region(), "");
2075
2076 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2077 assert(o != nullptr && o != top(), "");
2078
2079 PhiNode* phi;
2080 if (o->is_Phi() && o->as_Phi()->region() == region) {
2081 phi = o->as_Phi();
2082 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2083 // clone the shared base memory phi to make a new memory split
2084 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2085 const Type* t = phi->bottom_type();
2086 const TypePtr* adr_type = C->get_adr_type(idx);
2176 // Add check to deoptimize once holder klass is fully initialized.
2177 void Parse::clinit_deopt() {
2178 assert(C->has_method(), "only for normal compilations");
2179 assert(depth() == 1, "only for main compiled method");
2180 assert(is_normal_parse(), "no barrier needed on osr entry");
2181 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2182
2183 set_parse_bci(0);
2184
2185 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2186 guard_klass_being_initialized(holder);
2187 }
2188
2189 //------------------------------return_current---------------------------------
2190 // Append current _map to _exit_return
2191 void Parse::return_current(Node* value) {
2192 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2193 call_register_finalizer();
2194 }
2195
2196 // Do not set_parse_bci, so that return goo is credited to the return insn.
2197 set_bci(InvocationEntryBci);
2198 if (method()->is_synchronized()) {
2199 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2200 }
2201 if (C->env()->dtrace_method_probes()) {
2202 make_dtrace_method_exit(method());
2203 }
2204 SafePointNode* exit_return = _exits.map();
2205 exit_return->in( TypeFunc::Control )->add_req( control() );
2206 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2207 Node *mem = exit_return->in( TypeFunc::Memory );
2208 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2209 if (mms.is_empty()) {
2210 // get a copy of the base memory, and patch just this one input
2211 const TypePtr* adr_type = mms.adr_type(C);
2212 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2213 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2214 gvn().set_type_bottom(phi);
2215 phi->del_req(phi->req()-1); // prepare to re-patch
2216 mms.set_memory(phi);
2217 }
2218 mms.memory()->add_req(mms.memory2());
2219 }
2220
2221 // frame pointer is always same, already captured
2222 if (value != nullptr) {
2223 // If returning oops to an interface-return, there is a silent free
2224 // cast from oop to interface allowed by the Verifier. Make it explicit
2225 // here.
2226 Node* phi = _exits.argument(0);
2227 phi->add_req(value);
2228 }
2229
2230 if (_first_return) {
2231 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2232 _first_return = false;
2233 } else {
2234 _exits.map()->merge_replaced_nodes_with(map());
2235 }
2236
2237 stop_and_kill_map(); // This CFG path dies here
2238 }
2239
2240
2241 //------------------------------add_safepoint----------------------------------
2242 void Parse::add_safepoint() {
2243 uint parms = TypeFunc::Parms+1;
2244
2245 // Clear out dead values from the debug info.
2246 kill_dead_locals();
2247
2248 // Clone the JVM State
2249 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|
1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciObjArrayKlass.hpp"
26 #include "ci/ciSignature.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "interpreter/linkResolver.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/method.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/c2compiler.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/idealGraphPrinter.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/locknode.hpp"
38 #include "opto/memnode.hpp"
39 #include "opto/opaquenode.hpp"
40 #include "opto/parse.hpp"
41 #include "opto/rootnode.hpp"
42 #include "opto/runtime.hpp"
43 #include "opto/type.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/handles.inline.hpp"
46 #include "runtime/safepointMechanism.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "utilities/bitMap.inline.hpp"
49 #include "utilities/copy.hpp"
50
51 // Static array so we can figure out which bytecodes stop us from compiling
52 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
53 // and eventually should be encapsulated in a proper class (gri 8/18/98).
54
55 #ifndef PRODUCT
56 uint nodes_created = 0;
57 uint methods_parsed = 0;
58 uint methods_seen = 0;
59 uint blocks_parsed = 0;
60 uint blocks_seen = 0;
61
62 uint explicit_null_checks_inserted = 0;
63 uint explicit_null_checks_elided = 0;
64 uint all_null_checks_found = 0;
89 }
90 if (all_null_checks_found) {
91 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
92 (100*implicit_null_checks)/all_null_checks_found);
93 }
94 if (SharedRuntime::_implicit_null_throws) {
95 tty->print_cr("%u implicit null exceptions at runtime",
96 SharedRuntime::_implicit_null_throws);
97 }
98
99 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
100 BytecodeParseHistogram::print();
101 }
102 }
103 #endif
104
105 //------------------------------ON STACK REPLACEMENT---------------------------
106
107 // Construct a node which can be used to get incoming state for
108 // on stack replacement.
109 Node* Parse::fetch_interpreter_state(int index,
110 const Type* type,
111 Node* local_addrs,
112 Node* local_addrs_base) {
113 BasicType bt = type->basic_type();
114 if (type == TypePtr::NULL_PTR) {
115 // Ptr types are mixed together with T_ADDRESS but nullptr is
116 // really for T_OBJECT types so correct it.
117 bt = T_OBJECT;
118 }
119 Node *mem = memory(Compile::AliasIdxRaw);
120 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
121 Node *ctl = control();
122
123 // Very similar to LoadNode::make, except we handle un-aligned longs and
124 // doubles on Sparc. Intel can handle them just fine directly.
125 Node *l = nullptr;
126 switch (bt) { // Signature is flattened
127 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
128 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
129 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
130 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
131 case T_LONG:
132 case T_DOUBLE: {
133 // Since arguments are in reverse order, the argument address 'adr'
134 // refers to the back half of the long/double. Recompute adr.
135 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
136 if (Matcher::misaligned_doubles_ok) {
137 l = (bt == T_DOUBLE)
138 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
139 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
140 } else {
141 l = (bt == T_DOUBLE)
142 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
143 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
144 }
145 break;
146 }
147 default: ShouldNotReachHere();
148 }
149 return _gvn.transform(l);
150 }
151
152 // Helper routine to prevent the interpreter from handing
153 // unexpected typestate to an OSR method.
154 // The Node l is a value newly dug out of the interpreter frame.
155 // The type is the type predicted by ciTypeFlow. Note that it is
156 // not a general type, but can only come from Type::get_typeflow_type.
157 // The safepoint is a map which will feed an uncommon trap.
158 Node* Parse::check_interpreter_type(Node* l, const Type* type, const TypeKlassPtr* klass_type,
159 SafePointNode* &bad_type_exit, bool is_early_larval) {
160 const TypeOopPtr* tp = type->isa_oopptr();
161
162 // TypeFlow may assert null-ness if a type appears unloaded.
163 if (type == TypePtr::NULL_PTR ||
164 (tp != nullptr && !tp->is_loaded())) {
165 // Value must be null, not a real oop.
166 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
167 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
168 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
169 set_control(_gvn.transform( new IfTrueNode(iff) ));
170 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
171 bad_type_exit->control()->add_req(bad_type);
172 l = null();
173 }
174
175 // Typeflow can also cut off paths from the CFG, based on
176 // types which appear unloaded, or call sites which appear unlinked.
177 // When paths are cut off, values at later merge points can rise
178 // toward more specific classes. Make sure these specific classes
179 // are still in effect.
180 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
181 // TypeFlow asserted a specific object type. Value must have that type.
182 Node* bad_type_ctrl = nullptr;
183 if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
184 // Check inline types for null here to prevent checkcast from adding an
185 // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
186 l = null_check_oop(l, &bad_type_ctrl);
187 bad_type_exit->control()->add_req(bad_type_ctrl);
188 }
189
190 l = gen_checkcast(l, makecon(klass_type), &bad_type_ctrl, false, is_early_larval);
191 bad_type_exit->control()->add_req(bad_type_ctrl);
192 }
193
194 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
195 return l;
196 }
197
198 // Helper routine which sets up elements of the initial parser map when
199 // performing a parse for on stack replacement. Add values into map.
200 // The only parameter contains the address of a interpreter arguments.
201 void Parse::load_interpreter_state(Node* osr_buf) {
202 int index;
203 int max_locals = jvms()->loc_size();
204 int max_stack = jvms()->stk_size();
205
206 // Mismatch between method and jvms can occur since map briefly held
207 // an OSR entry state (which takes up one RawPtr word).
208 assert(max_locals == method()->max_locals(), "sanity");
209 assert(max_stack >= method()->max_stack(), "sanity");
210 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
211 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
212
213 // Find the start block.
214 Block* osr_block = start_block();
215 assert(osr_block->start() == osr_bci(), "sanity");
216
217 // Set initial BCI.
218 set_parse_bci(osr_block->start());
219
220 // Set initial stack depth.
221 set_sp(osr_block->start_sp());
222
223 // Check bailouts. We currently do not perform on stack replacement
224 // of loops in catch blocks or loops which branch with a non-empty stack.
225 if (sp() != 0) {
240 for (index = 0; index < mcnt; index++) {
241 // Make a BoxLockNode for the monitor.
242 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
243 // Check for bailout after new BoxLockNode
244 if (failing()) { return; }
245
246 // This OSR locking region is unbalanced because it does not have Lock node:
247 // locking was done in Interpreter.
248 // This is similar to Coarsened case when Lock node is eliminated
249 // and as result the region is marked as Unbalanced.
250
251 // Emulate Coarsened state transition from Regular to Unbalanced.
252 osr_box->set_coarsened();
253 osr_box->set_unbalanced();
254
255 Node* box = _gvn.transform(osr_box);
256
257 // Displaced headers and locked objects are interleaved in the
258 // temp OSR buffer. We only copy the locked objects out here.
259 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
260 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
261 // Try and copy the displaced header to the BoxNode
262 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
263
264 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
265
266 // Build a bogus FastLockNode (no code will be generated) and push the
267 // monitor into our debug info.
268 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
269 map()->push_monitor(flock);
270
271 // If the lock is our method synchronization lock, tuck it away in
272 // _sync_lock for return and rethrow exit paths.
273 if (index == 0 && method()->is_synchronized()) {
274 _synch_lock = flock;
275 }
276 }
277
278 // Use the raw liveness computation to make sure that unexpected
279 // values don't propagate into the OSR frame.
280 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
281 if (!live_locals.is_valid()) {
282 // Degenerate or breakpointed method.
310 if (C->log() != nullptr) {
311 C->log()->elem("OSR_mismatch local_index='%d'",index);
312 }
313 set_local(index, null());
314 // and ignore it for the loads
315 continue;
316 }
317 }
318
319 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
320 if (type == Type::TOP || type == Type::HALF) {
321 continue;
322 }
323 // If the type falls to bottom, then this must be a local that
324 // is mixing ints and oops or some such. Forcing it to top
325 // makes it go dead.
326 if (type == Type::BOTTOM) {
327 continue;
328 }
329 // Construct code to access the appropriate local.
330 Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
331 set_local(index, value);
332 }
333
334 // Extract the needed stack entries from the interpreter frame.
335 for (index = 0; index < sp(); index++) {
336 const Type *type = osr_block->stack_type_at(index);
337 if (type != Type::TOP) {
338 // Currently the compiler bails out when attempting to on stack replace
339 // at a bci with a non-empty stack. We should not reach here.
340 ShouldNotReachHere();
341 }
342 }
343
344 // End the OSR migration
345 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
346 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
347 "OSR_migration_end", TypeRawPtr::BOTTOM,
348 osr_buf);
349
350 // Now that the interpreter state is loaded, make sure it will match
361 if (type->isa_oopptr() != nullptr) {
362 if (!live_oops.at(index)) {
363 // skip type check for dead oops
364 continue;
365 }
366 }
367 if (osr_block->flow()->local_type_at(index)->is_return_address()) {
368 // In our current system it's illegal for jsr addresses to be
369 // live into an OSR entry point because the compiler performs
370 // inlining of jsrs. ciTypeFlow has a bailout that detect this
371 // case and aborts the compile if addresses are live into an OSR
372 // entry point. Because of that we can assume that any address
373 // locals at the OSR entry point are dead. Method liveness
374 // isn't precise enough to figure out that they are dead in all
375 // cases so simply skip checking address locals all
376 // together. Any type check is guaranteed to fail since the
377 // interpreter type is the result of a load which might have any
378 // value and the expected type is a constant.
379 continue;
380 }
381 const TypeKlassPtr* klass_type = nullptr;
382 if (type->isa_oopptr()) {
383 klass_type = TypeKlassPtr::make(osr_block->flow()->local_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
384 klass_type = klass_type->try_improve();
385 }
386 bool is_early_larval = osr_block->flow()->local_type_at(index)->is_early_larval();
387 set_local(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
388 }
389
390 for (index = 0; index < sp(); index++) {
391 if (stopped()) break;
392 Node* l = stack(index);
393 if (l->is_top()) continue; // nothing here
394 const Type* type = osr_block->stack_type_at(index);
395 const TypeKlassPtr* klass_type = nullptr;
396 if (type->isa_oopptr()) {
397 klass_type = TypeKlassPtr::make(osr_block->flow()->stack_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
398 klass_type = klass_type->try_improve();
399 }
400 bool is_early_larval = osr_block->flow()->stack_type_at(index)->is_early_larval();
401 set_stack(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
402 }
403
404 if (bad_type_exit->control()->req() > 1) {
405 // Build an uncommon trap here, if any inputs can be unexpected.
406 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
407 record_for_igvn(bad_type_exit->control());
408 SafePointNode* types_are_good = map();
409 set_map(bad_type_exit);
410 // The unexpected type happens because a new edge is active
411 // in the CFG, which typeflow had previously ignored.
412 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
413 // This x will be typed as Integer if notReached is not yet linked.
414 // It could also happen due to a problem in ciTypeFlow analysis.
415 uncommon_trap(Deoptimization::Reason_constraint,
416 Deoptimization::Action_reinterpret);
417 set_map(types_are_good);
418 }
419 }
420
421 //------------------------------Parse------------------------------------------
522 // either breakpoint setting or hotswapping of methods may
523 // cause deoptimization.
524 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
525 C->dependencies()->assert_evol_method(method());
526 }
527
528 NOT_PRODUCT(methods_seen++);
529
530 // Do some special top-level things.
531 if (depth() == 1 && C->is_osr_compilation()) {
532 _tf = C->tf(); // the OSR entry type is different
533 _entry_bci = C->entry_bci();
534 _flow = method()->get_osr_flow_analysis(osr_bci());
535 } else {
536 _tf = TypeFunc::make(method());
537 _entry_bci = InvocationEntryBci;
538 _flow = method()->get_flow_analysis();
539 }
540
541 if (_flow->failing()) {
542 // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
543 // can lead to this. Re-enable once 8284443 is fixed.
544 //assert(false, "type flow analysis failed during parsing");
545 C->record_method_not_compilable(_flow->failure_reason());
546 #ifndef PRODUCT
547 if (PrintOpto && (Verbose || WizardMode)) {
548 if (is_osr_parse()) {
549 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
550 } else {
551 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
552 }
553 if (Verbose) {
554 method()->print();
555 method()->print_codes();
556 _flow->print();
557 }
558 }
559 #endif
560 }
561
562 #ifdef ASSERT
563 if (depth() == 1) {
564 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
615 load_interpreter_state(osr_buf);
616 } else {
617 set_map(entry_map);
618 do_method_entry();
619 }
620
621 if (depth() == 1 && !failing()) {
622 if (C->clinit_barrier_on_entry()) {
623 // Add check to deoptimize the nmethod once the holder class is fully initialized
624 clinit_deopt();
625 }
626 }
627
628 // Check for bailouts during method entry.
629 if (failing()) {
630 if (log) log->done("parse");
631 C->set_default_node_notes(caller_nn);
632 return;
633 }
634
635 // Handle inline type arguments
636 int arg_size = method()->arg_size();
637 for (int i = 0; i < arg_size; i++) {
638 Node* parm = local(i);
639 const Type* t = _gvn.type(parm);
640 if (t->is_inlinetypeptr()) {
641 // If the parameter is a value object, try to scalarize it if we know that it is unrestricted (not early larval)
642 // Parameters are non-larval except the receiver of a constructor, which must be an early larval object.
643 if (!(method()->is_object_constructor() && i == 0)) {
644 // Create InlineTypeNode from the oop and replace the parameter
645 Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass());
646 replace_in_map(parm, vt);
647 }
648 } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && depth() == 1 && method()->has_vararg() && t->isa_aryptr()) {
649 // Speculate on varargs Object array being the default array refined type. The assumption is
650 // that a vararg method test(Object... o) is often called as test(o1, o2, o3). javac will
651 // translate the call so that the caller will create a new default array of Object, put o1,
652 // o2, o3 into the newly created array, then invoke the method test. This only makes sense if
653 // the method we are parsing is the top-level method of the compilation unit. Otherwise, if
654 // it is truly called according to our assumption, we must know the exact type of the
655 // argument because the allocation happens inside the compilation unit.
656 const TypePtr* spec_type = (t->speculative() != nullptr) ? t->speculative() : t->remove_speculative()->is_aryptr();
657 ciSignature* method_signature = method()->signature();
658 ciType* parm_citype = method_signature->type_at(method_signature->count() - 1);
659 if (!parm_citype->is_obj_array_klass()) {
660 continue;
661 }
662
663 ciObjArrayKlass* spec_citype = ciObjArrayKlass::make(parm_citype->as_obj_array_klass()->element_klass(), true);
664 const Type* improved_spec_type = TypeKlassPtr::make(spec_citype, Type::trust_interfaces)->as_instance_type();
665 improved_spec_type = improved_spec_type->join(spec_type)->join(TypePtr::NOTNULL);
666 if (improved_spec_type->empty()) {
667 continue;
668 }
669
670 const TypePtr* improved_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, improved_spec_type->is_ptr());
671 improved_type = improved_type->join_speculative(t)->is_ptr();
672 if (improved_type != t) {
673 Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, improved_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
674 replace_in_map(parm, cast);
675 }
676 }
677 }
678
679 entry_map = map(); // capture any changes performed by method setup code
680 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
681
682 // We begin parsing as if we have just encountered a jump to the
683 // method entry.
684 Block* entry_block = start_block();
685 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
686 set_map_clone(entry_map);
687 merge_common(entry_block, entry_block->next_path_num());
688
689 #ifndef PRODUCT
690 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
691 set_parse_histogram( parse_histogram_obj );
692 #endif
693
694 // Parse all the basic blocks.
695 do_all_blocks();
696
697 // Check for bailouts during conversion to graph
698 if (failing()) {
844 void Parse::build_exits() {
845 // make a clone of caller to prevent sharing of side-effects
846 _exits.set_map(_exits.clone_map());
847 _exits.clean_stack(_exits.sp());
848 _exits.sync_jvms();
849
850 RegionNode* region = new RegionNode(1);
851 record_for_igvn(region);
852 gvn().set_type_bottom(region);
853 _exits.set_control(region);
854
855 // Note: iophi and memphi are not transformed until do_exits.
856 Node* iophi = new PhiNode(region, Type::ABIO);
857 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
858 gvn().set_type_bottom(iophi);
859 gvn().set_type_bottom(memphi);
860 _exits.set_i_o(iophi);
861 _exits.set_all_memory(memphi);
862
863 // Add a return value to the exit state. (Do not push it yet.)
864 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
865 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
866 if (ret_type->isa_int()) {
867 BasicType ret_bt = method()->return_type()->basic_type();
868 if (ret_bt == T_BOOLEAN ||
869 ret_bt == T_CHAR ||
870 ret_bt == T_BYTE ||
871 ret_bt == T_SHORT) {
872 ret_type = TypeInt::INT;
873 }
874 }
875
876 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
877 // becomes loaded during the subsequent parsing, the loaded and unloaded
878 // types will not join when we transform and push in do_exits().
879 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
880 if (ret_oop_type && !ret_oop_type->is_loaded()) {
881 ret_type = TypeOopPtr::BOTTOM;
882 }
883 int ret_size = type2size[ret_type->basic_type()];
884 Node* ret_phi = new PhiNode(region, ret_type);
885 gvn().set_type_bottom(ret_phi);
886 _exits.ensure_stack(ret_size);
887 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
888 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
889 _exits.set_argument(0, ret_phi); // here is where the parser finds it
890 // Note: ret_phi is not yet pushed, until do_exits.
891 }
892 }
893
894 //----------------------------build_start_state-------------------------------
895 // Construct a state which contains only the incoming arguments from an
896 // unknown caller. The method & bci will be null & InvocationEntryBci.
897 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
898 int arg_size = tf->domain_sig()->cnt();
899 int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
900 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
901 SafePointNode* map = new SafePointNode(max_size, jvms);
902 jvms->set_map(map);
903 record_for_igvn(map);
904 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
905 Node_Notes* old_nn = default_node_notes();
906 if (old_nn != nullptr && has_method()) {
907 Node_Notes* entry_nn = old_nn->clone(this);
908 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
909 entry_jvms->set_offsets(0);
910 entry_jvms->set_bci(entry_bci());
911 entry_nn->set_jvms(entry_jvms);
912 set_default_node_notes(entry_nn);
913 }
914 PhaseGVN& gvn = *initial_gvn();
915 uint i = 0;
916 int arg_num = 0;
917 for (uint j = 0; i < (uint)arg_size; i++) {
918 const Type* t = tf->domain_sig()->field_at(i);
919 Node* parm = nullptr;
920 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
921 // Inline type arguments are not passed by reference: we get an argument per
922 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
923 GraphKit kit(jvms, &gvn);
924 kit.set_control(map->control());
925 Node* old_mem = map->memory();
926 // Use immutable memory for inline type loads and restore it below
927 kit.set_all_memory(C->immutable_memory());
928 parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
929 map->set_control(kit.control());
930 map->set_memory(old_mem);
931 } else {
932 parm = gvn.transform(new ParmNode(start, j++));
933 }
934 map->init_req(i, parm);
935 // Record all these guys for later GVN.
936 record_for_igvn(parm);
937 if (i >= TypeFunc::Parms && t != Type::HALF) {
938 arg_num++;
939 }
940 }
941 for (; i < map->req(); i++) {
942 map->init_req(i, top());
943 }
944 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
945 set_default_node_notes(old_nn);
946 return jvms;
947 }
948
949 //-----------------------------make_node_notes---------------------------------
950 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
951 if (caller_nn == nullptr) return nullptr;
952 Node_Notes* nn = caller_nn->clone(C);
953 JVMState* caller_jvms = nn->jvms();
954 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
955 jvms->set_offsets(0);
956 jvms->set_bci(_entry_bci);
957 nn->set_jvms(jvms);
958 return nn;
959 }
960
961
962 //--------------------------return_values--------------------------------------
963 void Compile::return_values(JVMState* jvms) {
964 GraphKit kit(jvms);
965 Node* ret = new ReturnNode(TypeFunc::Parms,
966 kit.control(),
967 kit.i_o(),
968 kit.reset_memory(),
969 kit.frameptr(),
970 kit.returnadr());
971 // Add zero or 1 return values
972 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
973 if (ret_size > 0) {
974 kit.inc_sp(-ret_size); // pop the return value(s)
975 kit.sync_jvms();
976 Node* res = kit.argument(0);
977 if (tf()->returns_inline_type_as_fields()) {
978 // Multiple return values (inline type fields): add as many edges
979 // to the Return node as returned values.
980 InlineTypeNode* vt = res->as_InlineType();
981 ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
982 if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
983 ret->init_req(TypeFunc::Parms, vt);
984 } else {
985 // Return the tagged klass pointer to signal scalarization to the caller
986 Node* tagged_klass = vt->tagged_klass(kit.gvn());
987 // Return null if the inline type is null (null marker field is not set)
988 Node* conv = kit.gvn().transform(new ConvI2LNode(vt->get_null_marker()));
989 Node* shl = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
990 Node* shr = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
991 tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
992 ret->init_req(TypeFunc::Parms, tagged_klass);
993 }
994 uint idx = TypeFunc::Parms + 1;
995 vt->pass_fields(&kit, ret, idx, false, false);
996 } else {
997 ret->add_req(res);
998 // Note: The second dummy edge is not needed by a ReturnNode.
999 }
1000 }
1001 // bind it to root
1002 root()->add_req(ret);
1003 record_for_igvn(ret);
1004 initial_gvn()->transform(ret);
1005 }
1006
1007 //------------------------rethrow_exceptions-----------------------------------
1008 // Bind all exception states in the list into a single RethrowNode.
1009 void Compile::rethrow_exceptions(JVMState* jvms) {
1010 GraphKit kit(jvms);
1011 if (!kit.has_exceptions()) return; // nothing to generate
1012 // Load my combined exception state into the kit, with all phis transformed:
1013 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
1014 Node* ex_oop = kit.use_exception_state(ex_map);
1015 RethrowNode* exit = new RethrowNode(kit.control(),
1016 kit.i_o(), kit.reset_memory(),
1017 kit.frameptr(), kit.returnadr(),
1018 // like a return but with exception input
1019 ex_oop);
1103 // to complete, we force all writes to complete.
1104 //
1105 // 2. Experimental VM option is used to force the barrier if any field
1106 // was written out in the constructor.
1107 //
1108 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1109 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1110 // MemBarVolatile is used before volatile load instead of after volatile
1111 // store, so there's no barrier after the store.
1112 // We want to guarantee the same behavior as on platforms with total store
1113 // order, although this is not required by the Java memory model.
1114 // In this case, we want to enforce visibility of volatile field
1115 // initializations which are performed in constructors.
1116 // So as with finals, we add a barrier here.
1117 //
1118 // "All bets are off" unless the first publication occurs after a
1119 // normal return from the constructor. We do not attempt to detect
1120 // such unusual early publications. But no barrier is needed on
1121 // exceptional returns, since they cannot publish normally.
1122 //
1123 if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1124 (wrote_final() || wrote_stable() ||
1125 (AlwaysSafeConstructors && wrote_fields()) ||
1126 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1127 Node* recorded_alloc = alloc_with_final_or_stable();
1128 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1129 recorded_alloc);
1130
1131 // If Memory barrier is created for final fields write
1132 // and allocation node does not escape the initialize method,
1133 // then barrier introduced by allocation node can be removed.
1134 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1135 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1136 alloc->compute_MemBar_redundancy(method());
1137 }
1138 if (PrintOpto && (Verbose || WizardMode)) {
1139 method()->print_name();
1140 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1141 }
1142 }
1143
1144 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1145 // transform each slice of the original memphi:
1146 mms.set_memory(_gvn.transform(mms.memory()));
1147 }
1148 // Clean up input MergeMems created by transforming the slices
1149 _gvn.transform(_exits.merged_memory());
1150
1151 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1152 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1153 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1154 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1155 // If the type we set for the ret_phi in build_exits() is too optimistic and
1156 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1157 // loading. It could also be due to an error, so mark this method as not compilable because
1158 // otherwise this could lead to an infinite compile loop.
1159 // In any case, this code path is rarely (and never in my testing) reached.
1160 C->record_method_not_compilable("Can't determine return type.");
1161 return;
1162 }
1163 if (ret_type->isa_int()) {
1164 BasicType ret_bt = method()->return_type()->basic_type();
1165 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1166 }
1167 _exits.push_node(ret_type->basic_type(), ret_phi);
1168 }
1169
1170 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1171
1172 // Unlock along the exceptional paths.
1226
1227 //-----------------------------create_entry_map-------------------------------
1228 // Initialize our parser map to contain the types at method entry.
1229 // For OSR, the map contains a single RawPtr parameter.
1230 // Initial monitor locking for sync. methods is performed by do_method_entry.
1231 SafePointNode* Parse::create_entry_map() {
1232 // Check for really stupid bail-out cases.
1233 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1234 if (len >= 32760) {
1235 // Bailout expected, this is a very rare edge case.
1236 C->record_method_not_compilable("too many local variables");
1237 return nullptr;
1238 }
1239
1240 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1241 _caller->map()->delete_replaced_nodes();
1242
1243 // If this is an inlined method, we may have to do a receiver null check.
1244 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1245 GraphKit kit(_caller);
1246 Node* receiver = kit.argument(0);
1247 Node* null_free = kit.null_check_receiver_before_call(method());
1248 _caller = kit.transfer_exceptions_into_jvms();
1249
1250 if (kit.stopped()) {
1251 _exits.add_exception_states_from(_caller);
1252 _exits.set_jvms(_caller);
1253 return nullptr;
1254 }
1255 }
1256
1257 assert(method() != nullptr, "parser must have a method");
1258
1259 // Create an initial safepoint to hold JVM state during parsing
1260 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1261 set_map(new SafePointNode(len, jvms));
1262
1263 // Capture receiver info for compiled lambda forms.
1264 if (method()->is_compiled_lambda_form()) {
1265 ciInstance* recv_info = _caller->compute_receiver_info(method());
1266 jvms->set_receiver_info(recv_info);
1267 }
1268
1269 jvms->set_map(map());
1273 SafePointNode* inmap = _caller->map();
1274 assert(inmap != nullptr, "must have inmap");
1275 // In case of null check on receiver above
1276 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1277
1278 uint i;
1279
1280 // Pass thru the predefined input parameters.
1281 for (i = 0; i < TypeFunc::Parms; i++) {
1282 map()->init_req(i, inmap->in(i));
1283 }
1284
1285 if (depth() == 1) {
1286 assert(map()->memory()->Opcode() == Op_Parm, "");
1287 // Insert the memory aliasing node
1288 set_all_memory(reset_memory());
1289 }
1290 assert(merged_memory(), "");
1291
1292 // Now add the locals which are initially bound to arguments:
1293 uint arg_size = tf()->domain_sig()->cnt();
1294 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1295 for (i = TypeFunc::Parms; i < arg_size; i++) {
1296 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1297 }
1298
1299 // Clear out the rest of the map (locals and stack)
1300 for (i = arg_size; i < len; i++) {
1301 map()->init_req(i, top());
1302 }
1303
1304 SafePointNode* entry_map = stop();
1305 return entry_map;
1306 }
1307
1308 //-----------------------------do_method_entry--------------------------------
1309 // Emit any code needed in the pseudo-block before BCI zero.
1310 // The main thing to do is lock the receiver of a synchronized method.
1311 void Parse::do_method_entry() {
1312 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1313 set_sp(0); // Java Stack Pointer
1314
1315 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1316
1317 // Check if we need a membar at the beginning of the java.lang.Object
1318 // constructor to satisfy the memory model for strict fields.
1319 if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1320 Node* receiver_obj = local(0);
1321 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1322 // If there's no exact type, check if the declared type has no implementors and add a dependency
1323 const TypeKlassPtr* klass_ptr = receiver_type->as_klass_type(/* try_for_exact= */ true);
1324 ciType* klass = klass_ptr->klass_is_exact() ? klass_ptr->exact_klass() : nullptr;
1325 if (klass != nullptr && klass->is_instance_klass()) {
1326 // Exact receiver type, check if there is a strict field
1327 ciInstanceKlass* holder = klass->as_instance_klass();
1328 for (int i = 0; i < holder->nof_nonstatic_fields(); i++) {
1329 ciField* field = holder->nonstatic_field_at(i);
1330 if (field->is_strict()) {
1331 // Found a strict field, a membar is needed
1332 AllocateNode* alloc = AllocateNode::Ideal_allocation(receiver_obj);
1333 insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease, receiver_obj);
1334 if (DoEscapeAnalysis && (alloc != nullptr)) {
1335 alloc->compute_MemBar_redundancy(method());
1336 }
1337 break;
1338 }
1339 }
1340 } else if (klass == nullptr) {
1341 // We can't statically determine the type of the receiver and therefore need
1342 // to put a membar here because it could have a strict field.
1343 insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease);
1344 }
1345 }
1346
1347 if (C->env()->dtrace_method_probes()) {
1348 make_dtrace_method_entry(method());
1349 }
1350
1351 #ifdef ASSERT
1352 // Narrow receiver type when it is too broad for the method being parsed.
1353 if (!method()->is_static()) {
1354 ciInstanceKlass* callee_holder = method()->holder();
1355 const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1356
1357 Node* receiver_obj = local(0);
1358 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1359
1360 if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1361 // Receiver should always be a subtype of callee holder.
1362 // But, since C2 type system doesn't properly track interfaces,
1363 // the invariant can't be expressed in the type system for default methods.
1364 // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1365 assert(callee_holder->is_interface(), "missing subtype check");
1366
1376
1377 // If the method is synchronized, we need to construct a lock node, attach
1378 // it to the Start node, and pin it there.
1379 if (method()->is_synchronized()) {
1380 // Insert a FastLockNode right after the Start which takes as arguments
1381 // the current thread pointer, the "this" pointer & the address of the
1382 // stack slot pair used for the lock. The "this" pointer is a projection
1383 // off the start node, but the locking spot has to be constructed by
1384 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1385 // becomes the second argument to the FastLockNode call. The
1386 // FastLockNode becomes the new control parent to pin it to the start.
1387
1388 // Setup Object Pointer
1389 Node *lock_obj = nullptr;
1390 if (method()->is_static()) {
1391 ciInstance* mirror = _method->holder()->java_mirror();
1392 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1393 lock_obj = makecon(t_lock);
1394 } else { // Else pass the "this" pointer,
1395 lock_obj = local(0); // which is Parm0 from StartNode
1396 assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1397 }
1398 // Clear out dead values from the debug info.
1399 kill_dead_locals();
1400 // Build the FastLockNode
1401 _synch_lock = shared_lock(lock_obj);
1402 // Check for bailout in shared_lock
1403 if (failing()) { return; }
1404 }
1405
1406 // Feed profiling data for parameters to the type system so it can
1407 // propagate it as speculative types
1408 record_profiled_parameters_for_speculation();
1409 }
1410
1411 //------------------------------init_blocks------------------------------------
1412 // Initialize our parser map to contain the types/monitors at method entry.
1413 void Parse::init_blocks() {
1414 // Create the blocks.
1415 _block_count = flow()->block_count();
1416 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1812 //--------------------handle_missing_successor---------------------------------
1813 void Parse::handle_missing_successor(int target_bci) {
1814 #ifndef PRODUCT
1815 Block* b = block();
1816 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1817 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1818 #endif
1819 ShouldNotReachHere();
1820 }
1821
1822 //--------------------------merge_common---------------------------------------
1823 void Parse::merge_common(Parse::Block* target, int pnum) {
1824 if (TraceOptoParse) {
1825 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1826 }
1827
1828 // Zap extra stack slots to top
1829 assert(sp() == target->start_sp(), "");
1830 clean_stack(sp());
1831
1832 // Check for merge conflicts involving inline types
1833 JVMState* old_jvms = map()->jvms();
1834 int old_bci = bci();
1835 JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1836 tmp_jvms->set_should_reexecute(true);
1837 tmp_jvms->bind_map(map());
1838 // Execution needs to restart a the next bytecode (entry of next
1839 // block)
1840 if (target->is_merged() ||
1841 pnum > PhiNode::Input ||
1842 target->is_handler() ||
1843 target->is_loop_head()) {
1844 set_parse_bci(target->start());
1845 for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1846 Node* n = map()->in(j); // Incoming change to target state.
1847 const Type* t = nullptr;
1848 if (tmp_jvms->is_loc(j)) {
1849 t = target->local_type_at(j - tmp_jvms->locoff());
1850 } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1851 t = target->stack_type_at(j - tmp_jvms->stkoff());
1852 }
1853 if (t != nullptr && t != Type::BOTTOM) {
1854 // An object can appear in the JVMS as either an oop or an InlineTypeNode. If the merge is
1855 // an InlineTypeNode, we need all the merge inputs to be InlineTypeNodes. Else, if the
1856 // merge is an oop, each merge input needs to be either an oop or an buffered
1857 // InlineTypeNode.
1858 if (!t->is_inlinetypeptr()) {
1859 // The merge cannot be an InlineTypeNode, ensure the input is buffered if it is an
1860 // InlineTypeNode
1861 if (n->is_InlineType()) {
1862 map()->set_req(j, n->as_InlineType()->buffer(this));
1863 }
1864 } else {
1865 // Since the merge is a value object, it can either be an oop or an InlineTypeNode
1866 if (!target->is_merged()) {
1867 // This is the first processed input of the merge. If it is an InlineTypeNode, the
1868 // merge will be an InlineTypeNode. Else, try to scalarize so the merge can be
1869 // scalarized as well. However, we cannot blindly scalarize an inline type oop here
1870 // since it may be larval
1871 if (!n->is_InlineType() && gvn().type(n)->is_zero_type()) {
1872 // Null constant implies that this is not a larval object
1873 map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1874 }
1875 } else {
1876 Node* phi = target->start_map()->in(j);
1877 if (phi->is_InlineType()) {
1878 // Larval oops cannot be merged with non-larval ones, and since the merge point is
1879 // non-larval, n must be non-larval as well. As a result, we can scalarize n to merge
1880 // into phi
1881 if (!n->is_InlineType()) {
1882 map()->set_req(j, InlineTypeNode::make_from_oop(this, n, t->inline_klass()));
1883 }
1884 } else {
1885 // The merge is an oop phi, ensure the input is buffered if it is an InlineTypeNode
1886 if (n->is_InlineType()) {
1887 map()->set_req(j, n->as_InlineType()->buffer(this));
1888 }
1889 }
1890 }
1891 }
1892 }
1893 }
1894 }
1895 old_jvms->bind_map(map());
1896 set_parse_bci(old_bci);
1897
1898 if (!target->is_merged()) { // No prior mapping at this bci
1899 if (TraceOptoParse) { tty->print(" with empty state"); }
1900
1901 // If this path is dead, do not bother capturing it as a merge.
1902 // It is "as if" we had 1 fewer predecessors from the beginning.
1903 if (stopped()) {
1904 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1905 return;
1906 }
1907
1908 // Make a region if we know there are multiple or unpredictable inputs.
1909 // (Also, if this is a plain fall-through, we might see another region,
1910 // which must not be allowed into this block's map.)
1911 if (pnum > PhiNode::Input // Known multiple inputs.
1912 || target->is_handler() // These have unpredictable inputs.
1913 || target->is_loop_head() // Known multiple inputs
1914 || control()->is_Region()) { // We must hide this guy.
1915
1916 int current_bci = bci();
1917 set_parse_bci(target->start()); // Set target bci
1932 record_for_igvn(r);
1933 // zap all inputs to null for debugging (done in Node(uint) constructor)
1934 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1935 r->init_req(pnum, control());
1936 set_control(r);
1937 target->copy_irreducible_status_to(r, jvms());
1938 set_parse_bci(current_bci); // Restore bci
1939 }
1940
1941 // Convert the existing Parser mapping into a mapping at this bci.
1942 store_state_to(target);
1943 assert(target->is_merged(), "do not come here twice");
1944
1945 } else { // Prior mapping at this bci
1946 if (TraceOptoParse) { tty->print(" with previous state"); }
1947 #ifdef ASSERT
1948 if (target->is_SEL_head()) {
1949 target->mark_merged_backedge(block());
1950 }
1951 #endif
1952
1953 // We must not manufacture more phis if the target is already parsed.
1954 bool nophi = target->is_parsed();
1955
1956 SafePointNode* newin = map();// Hang on to incoming mapping
1957 Block* save_block = block(); // Hang on to incoming block;
1958 load_state_from(target); // Get prior mapping
1959
1960 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1961 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1962 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1963 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1964
1965 // Iterate over my current mapping and the old mapping.
1966 // Where different, insert Phi functions.
1967 // Use any existing Phi functions.
1968 assert(control()->is_Region(), "must be merging to a region");
1969 RegionNode* r = control()->as_Region();
1970
1971 // Compute where to merge into
1972 // Merge incoming control path
1973 r->init_req(pnum, newin->control());
1974
1975 if (pnum == 1) { // Last merge for this Region?
1976 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1977 Node* result = _gvn.transform(r);
1978 if (r != result && TraceOptoParse) {
1979 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1980 }
1981 }
1982 record_for_igvn(r);
1983 }
1984
1985 // Update all the non-control inputs to map:
1986 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1987 bool check_elide_phi = target->is_SEL_backedge(save_block);
1988 bool last_merge = (pnum == PhiNode::Input);
1989 for (uint j = 1; j < newin->req(); j++) {
1990 Node* m = map()->in(j); // Current state of target.
1991 Node* n = newin->in(j); // Incoming change to target state.
1992 Node* phi;
1993 if (m->is_Phi() && m->as_Phi()->region() == r) {
1994 phi = m;
1995 } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1996 phi = m;
1997 } else {
1998 phi = nullptr;
1999 }
2000 if (m != n) { // Different; must merge
2001 switch (j) {
2002 // Frame pointer and Return Address never changes
2003 case TypeFunc::FramePtr:// Drop m, use the original value
2004 case TypeFunc::ReturnAdr:
2005 break;
2006 case TypeFunc::Memory: // Merge inputs to the MergeMem node
2007 assert(phi == nullptr, "the merge contains phis, not vice versa");
2008 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
2009 continue;
2010 default: // All normal stuff
2011 if (phi == nullptr) {
2012 const JVMState* jvms = map()->jvms();
2013 if (EliminateNestedLocks &&
2014 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
2015 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
2016 // Use old BoxLock node as merged box.
2017 assert(newin->jvms()->is_monitor_box(j), "sanity");
2018 // This assert also tests that nodes are BoxLock.
2019 assert(BoxLockNode::same_slot(n, m), "sanity");
2026 // Incremental Inlining before EA and Macro nodes elimination.
2027 //
2028 // Incremental Inlining is executed after IGVN optimizations
2029 // during which BoxLock can be marked as Coarsened.
2030 old_box->set_coarsened(); // Verifies state
2031 old_box->set_unbalanced();
2032 }
2033 C->gvn_replace_by(n, m);
2034 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
2035 phi = ensure_phi(j, nophi);
2036 }
2037 }
2038 break;
2039 }
2040 }
2041 // At this point, n might be top if:
2042 // - there is no phi (because TypeFlow detected a conflict), or
2043 // - the corresponding control edges is top (a dead incoming path)
2044 // It is a bug if we create a phi which sees a garbage value on a live path.
2045
2046 // Merging two inline types?
2047 if (phi != nullptr && phi->is_InlineType()) {
2048 // Reload current state because it may have been updated by ensure_phi
2049 assert(phi == map()->in(j), "unexpected value in map");
2050 assert(phi->as_InlineType()->has_phi_inputs(r), "");
2051 InlineTypeNode* vtm = phi->as_InlineType(); // Current inline type
2052 InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
2053 assert(vtm == phi, "Inline type should have Phi input");
2054
2055 #ifdef ASSERT
2056 if (TraceOptoParse) {
2057 tty->print_cr("\nMerging inline types");
2058 tty->print_cr("Current:");
2059 vtm->dump(2);
2060 tty->print_cr("Incoming:");
2061 vtn->dump(2);
2062 tty->cr();
2063 }
2064 #endif
2065 // Do the merge
2066 vtm->merge_with(&_gvn, vtn, pnum, last_merge);
2067 if (last_merge) {
2068 map()->set_req(j, _gvn.transform(vtm));
2069 record_for_igvn(vtm);
2070 }
2071 } else if (phi != nullptr) {
2072 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
2073 assert(phi->as_Phi()->region() == r, "");
2074 phi->set_req(pnum, n); // Then add 'n' to the merge
2075 if (last_merge) {
2076 // Last merge for this Phi.
2077 // So far, Phis have had a reasonable type from ciTypeFlow.
2078 // Now _gvn will join that with the meet of current inputs.
2079 // BOTTOM is never permissible here, 'cause pessimistically
2080 // Phis of pointers cannot lose the basic pointer type.
2081 DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
2082 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
2083 map()->set_req(j, _gvn.transform(phi));
2084 DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
2085 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
2086 record_for_igvn(phi);
2087 }
2088 }
2089 } // End of for all values to be merged
2090
2091 if (last_merge && !r->in(0)) { // The occasional useless Region
2092 assert(control() == r, "");
2093 set_control(r->nonnull_req());
2094 }
2095
2096 map()->merge_replaced_nodes_with(newin);
2097
2098 // newin has been subsumed into the lazy merge, and is now dead.
2099 set_block(save_block);
2100
2101 stop(); // done with this guy, for now
2102 }
2103
2104 if (TraceOptoParse) {
2105 tty->print_cr(" on path %d", pnum);
2106 }
2107
2108 // Done with this parser state.
2109 assert(stopped(), "");
2110 }
2111
2223
2224 // Add new path to the region.
2225 uint pnum = r->req();
2226 r->add_req(nullptr);
2227
2228 for (uint i = 1; i < map->req(); i++) {
2229 Node* n = map->in(i);
2230 if (i == TypeFunc::Memory) {
2231 // Ensure a phi on all currently known memories.
2232 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2233 Node* phi = mms.memory();
2234 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2235 assert(phi->req() == pnum, "must be same size as region");
2236 phi->add_req(nullptr);
2237 }
2238 }
2239 } else {
2240 if (n->is_Phi() && n->as_Phi()->region() == r) {
2241 assert(n->req() == pnum, "must be same size as region");
2242 n->add_req(nullptr);
2243 } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2244 n->as_InlineType()->add_new_path(r);
2245 }
2246 }
2247 }
2248
2249 return pnum;
2250 }
2251
2252 //------------------------------ensure_phi-------------------------------------
2253 // Turn the idx'th entry of the current map into a Phi
2254 Node* Parse::ensure_phi(int idx, bool nocreate) {
2255 SafePointNode* map = this->map();
2256 Node* region = map->control();
2257 assert(region->is_Region(), "");
2258
2259 Node* o = map->in(idx);
2260 assert(o != nullptr, "");
2261
2262 if (o == top()) return nullptr; // TOP always merges into TOP
2263
2264 if (o->is_Phi() && o->as_Phi()->region() == region) {
2265 return o->as_Phi();
2266 }
2267 InlineTypeNode* vt = o->isa_InlineType();
2268 if (vt != nullptr && vt->has_phi_inputs(region)) {
2269 return vt;
2270 }
2271
2272 // Now use a Phi here for merging
2273 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2274 const JVMState* jvms = map->jvms();
2275 const Type* t = nullptr;
2276 if (jvms->is_loc(idx)) {
2277 t = block()->local_type_at(idx - jvms->locoff());
2278 } else if (jvms->is_stk(idx)) {
2279 t = block()->stack_type_at(idx - jvms->stkoff());
2280 } else if (jvms->is_mon(idx)) {
2281 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2282 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2283 } else if ((uint)idx < TypeFunc::Parms) {
2284 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2285 } else {
2286 assert(false, "no type information for this phi");
2287 }
2288
2289 // If the type falls to bottom, then this must be a local that
2290 // is already dead or is mixing ints and oops or some such.
2291 // Forcing it to top makes it go dead.
2292 if (t == Type::BOTTOM) {
2293 map->set_req(idx, top());
2294 return nullptr;
2295 }
2296
2297 // Do not create phis for top either.
2298 // A top on a non-null control flow must be an unused even after the.phi.
2299 if (t == Type::TOP || t == Type::HALF) {
2300 map->set_req(idx, top());
2301 return nullptr;
2302 }
2303
2304 if (vt != nullptr && t->is_inlinetypeptr()) {
2305 // Inline types are merged by merging their field values.
2306 // Create a cloned InlineTypeNode with phi inputs that
2307 // represents the merged inline type and update the map.
2308 vt = vt->clone_with_phis(&_gvn, region);
2309 map->set_req(idx, vt);
2310 return vt;
2311 } else {
2312 PhiNode* phi = PhiNode::make(region, o, t);
2313 gvn().set_type(phi, t);
2314 if (C->do_escape_analysis()) record_for_igvn(phi);
2315 map->set_req(idx, phi);
2316 return phi;
2317 }
2318 }
2319
2320 //--------------------------ensure_memory_phi----------------------------------
2321 // Turn the idx'th slice of the current memory into a Phi
2322 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2323 MergeMemNode* mem = merged_memory();
2324 Node* region = control();
2325 assert(region->is_Region(), "");
2326
2327 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2328 assert(o != nullptr && o != top(), "");
2329
2330 PhiNode* phi;
2331 if (o->is_Phi() && o->as_Phi()->region() == region) {
2332 phi = o->as_Phi();
2333 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2334 // clone the shared base memory phi to make a new memory split
2335 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2336 const Type* t = phi->bottom_type();
2337 const TypePtr* adr_type = C->get_adr_type(idx);
2427 // Add check to deoptimize once holder klass is fully initialized.
2428 void Parse::clinit_deopt() {
2429 assert(C->has_method(), "only for normal compilations");
2430 assert(depth() == 1, "only for main compiled method");
2431 assert(is_normal_parse(), "no barrier needed on osr entry");
2432 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2433
2434 set_parse_bci(0);
2435
2436 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2437 guard_klass_being_initialized(holder);
2438 }
2439
2440 //------------------------------return_current---------------------------------
2441 // Append current _map to _exit_return
2442 void Parse::return_current(Node* value) {
2443 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2444 call_register_finalizer();
2445 }
2446
2447 // frame pointer is always same, already captured
2448 if (value != nullptr) {
2449 Node* phi = _exits.argument(0);
2450 const Type* return_type = phi->bottom_type();
2451 const TypeInstPtr* tr = return_type->isa_instptr();
2452 if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2453 return_type->is_inlinetypeptr()) {
2454 // Inline type is returned as fields, make sure it is scalarized
2455 if (!value->is_InlineType()) {
2456 value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
2457 }
2458 if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2459 // Returning from root or an incrementally inlined method. Make sure all non-flat
2460 // fields are buffered and re-execute if allocation triggers deoptimization.
2461 PreserveReexecuteState preexecs(this);
2462 assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2463 jvms()->set_should_reexecute(true);
2464 inc_sp(1);
2465 value = value->as_InlineType()->allocate_fields(this);
2466 }
2467 } else if (value->is_InlineType()) {
2468 // Inline type is returned as oop, make sure it is buffered and re-execute
2469 // if allocation triggers deoptimization.
2470 PreserveReexecuteState preexecs(this);
2471 jvms()->set_should_reexecute(true);
2472 inc_sp(1);
2473 value = value->as_InlineType()->buffer(this);
2474 }
2475 // ...else
2476 // If returning oops to an interface-return, there is a silent free
2477 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2478 phi->add_req(value);
2479 }
2480
2481 // Do not set_parse_bci, so that return goo is credited to the return insn.
2482 set_bci(InvocationEntryBci);
2483 if (method()->is_synchronized()) {
2484 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2485 }
2486 if (C->env()->dtrace_method_probes()) {
2487 make_dtrace_method_exit(method());
2488 }
2489
2490 SafePointNode* exit_return = _exits.map();
2491 exit_return->in( TypeFunc::Control )->add_req( control() );
2492 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2493 Node *mem = exit_return->in( TypeFunc::Memory );
2494 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2495 if (mms.is_empty()) {
2496 // get a copy of the base memory, and patch just this one input
2497 const TypePtr* adr_type = mms.adr_type(C);
2498 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2499 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2500 gvn().set_type_bottom(phi);
2501 phi->del_req(phi->req()-1); // prepare to re-patch
2502 mms.set_memory(phi);
2503 }
2504 mms.memory()->add_req(mms.memory2());
2505 }
2506
2507 if (_first_return) {
2508 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2509 _first_return = false;
2510 } else {
2511 _exits.map()->merge_replaced_nodes_with(map());
2512 }
2513
2514 stop_and_kill_map(); // This CFG path dies here
2515 }
2516
2517
2518 //------------------------------add_safepoint----------------------------------
2519 void Parse::add_safepoint() {
2520 uint parms = TypeFunc::Parms+1;
2521
2522 // Clear out dead values from the debug info.
2523 kill_dead_locals();
2524
2525 // Clone the JVM State
2526 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|