5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "interpreter/linkResolver.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/method.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/c2compiler.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/idealGraphPrinter.hpp"
33 #include "opto/locknode.hpp"
34 #include "opto/memnode.hpp"
35 #include "opto/opaquenode.hpp"
36 #include "opto/parse.hpp"
37 #include "opto/rootnode.hpp"
38 #include "opto/runtime.hpp"
39 #include "opto/type.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/safepointMechanism.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "utilities/bitMap.inline.hpp"
44 #include "utilities/copy.hpp"
45
46 // Static array so we can figure out which bytecodes stop us from compiling
47 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
48 // and eventually should be encapsulated in a proper class (gri 8/18/98).
49
50 #ifndef PRODUCT
51 uint nodes_created = 0;
52 uint methods_parsed = 0;
53 uint methods_seen = 0;
54 uint blocks_parsed = 0;
55 uint blocks_seen = 0;
56
57 uint explicit_null_checks_inserted = 0;
58 uint explicit_null_checks_elided = 0;
59 uint all_null_checks_found = 0;
84 }
85 if (all_null_checks_found) {
86 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
87 (100*implicit_null_checks)/all_null_checks_found);
88 }
89 if (SharedRuntime::_implicit_null_throws) {
90 tty->print_cr("%u implicit null exceptions at runtime",
91 SharedRuntime::_implicit_null_throws);
92 }
93
94 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
95 BytecodeParseHistogram::print();
96 }
97 }
98 #endif
99
100 //------------------------------ON STACK REPLACEMENT---------------------------
101
102 // Construct a node which can be used to get incoming state for
103 // on stack replacement.
104 Node *Parse::fetch_interpreter_state(int index,
105 BasicType bt,
106 Node* local_addrs) {
107 Node* mem = memory(Compile::AliasIdxRaw);
108 Node* adr = off_heap_plus_addr(local_addrs, -index*wordSize);
109 Node* ctl = control();
110
111 // Very similar to LoadNode::make, except we handle un-aligned longs and
112 // doubles on Sparc. Intel can handle them just fine directly.
113 Node *l = nullptr;
114 switch (bt) { // Signature is flattened
115 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
116 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
117 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
118 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
119 case T_LONG:
120 case T_DOUBLE: {
121 // Since arguments are in reverse order, the argument address 'adr'
122 // refers to the back half of the long/double. Recompute adr.
123 adr = off_heap_plus_addr(local_addrs, -(index+1)*wordSize);
124 if (Matcher::misaligned_doubles_ok) {
125 l = (bt == T_DOUBLE)
126 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
127 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
128 } else {
129 l = (bt == T_DOUBLE)
130 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
131 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
132 }
133 break;
134 }
135 default: ShouldNotReachHere();
136 }
137 return _gvn.transform(l);
138 }
139
140 // Helper routine to prevent the interpreter from handing
141 // unexpected typestate to an OSR method.
142 // The Node l is a value newly dug out of the interpreter frame.
143 // The type is the type predicted by ciTypeFlow. Note that it is
144 // not a general type, but can only come from Type::get_typeflow_type.
145 // The safepoint is a map which will feed an uncommon trap.
146 Node* Parse::check_interpreter_type(Node* l, const Type* type,
147 SafePointNode* &bad_type_exit) {
148
149 const TypeOopPtr* tp = type->isa_oopptr();
150
151 // TypeFlow may assert null-ness if a type appears unloaded.
152 if (type == TypePtr::NULL_PTR ||
153 (tp != nullptr && !tp->is_loaded())) {
154 // Value must be null, not a real oop.
155 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
156 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
157 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
158 set_control(_gvn.transform( new IfTrueNode(iff) ));
159 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
160 bad_type_exit->control()->add_req(bad_type);
161 l = null();
162 }
163
164 // Typeflow can also cut off paths from the CFG, based on
165 // types which appear unloaded, or call sites which appear unlinked.
166 // When paths are cut off, values at later merge points can rise
167 // toward more specific classes. Make sure these specific classes
168 // are still in effect.
169 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
170 // TypeFlow asserted a specific object type. Value must have that type.
171 Node* bad_type_ctrl = nullptr;
172 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
173 bad_type_exit->control()->add_req(bad_type_ctrl);
174 }
175
176 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
177 return l;
178 }
179
180 // Helper routine which sets up elements of the initial parser map when
181 // performing a parse for on stack replacement. Add values into map.
182 // The only parameter contains the address of a interpreter arguments.
183 void Parse::load_interpreter_state(Node* osr_buf) {
184 int index;
185 int max_locals = jvms()->loc_size();
186 int max_stack = jvms()->stk_size();
187
188
189 // Mismatch between method and jvms can occur since map briefly held
190 // an OSR entry state (which takes up one RawPtr word).
191 assert(max_locals == method()->max_locals(), "sanity");
192 assert(max_stack >= method()->max_stack(), "sanity");
193 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
194 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
195
196 // Find the start block.
197 Block* osr_block = start_block();
198 assert(osr_block->start() == osr_bci(), "sanity");
199
200 // Set initial BCI.
201 set_parse_bci(osr_block->start());
202
203 // Set initial stack depth.
204 set_sp(osr_block->start_sp());
205
206 // Check bailouts. We currently do not perform on stack replacement
207 // of loops in catch blocks or loops which branch with a non-empty stack.
208 if (sp() != 0) {
223 for (index = 0; index < mcnt; index++) {
224 // Make a BoxLockNode for the monitor.
225 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
226 // Check for bailout after new BoxLockNode
227 if (failing()) { return; }
228
229 // This OSR locking region is unbalanced because it does not have Lock node:
230 // locking was done in Interpreter.
231 // This is similar to Coarsened case when Lock node is eliminated
232 // and as result the region is marked as Unbalanced.
233
234 // Emulate Coarsened state transition from Regular to Unbalanced.
235 osr_box->set_coarsened();
236 osr_box->set_unbalanced();
237
238 Node* box = _gvn.transform(osr_box);
239
240 // Displaced headers and locked objects are interleaved in the
241 // temp OSR buffer. We only copy the locked objects out here.
242 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
243 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr);
244 // Try and copy the displaced header to the BoxNode
245 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr);
246
247
248 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
249
250 // Build a bogus FastLockNode (no code will be generated) and push the
251 // monitor into our debug info.
252 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
253 map()->push_monitor(flock);
254
255 // If the lock is our method synchronization lock, tuck it away in
256 // _sync_lock for return and rethrow exit paths.
257 if (index == 0 && method()->is_synchronized()) {
258 _synch_lock = flock;
259 }
260 }
261
262 // Use the raw liveness computation to make sure that unexpected
263 // values don't propagate into the OSR frame.
264 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
265 if (!live_locals.is_valid()) {
266 // Degenerate or breakpointed method.
294 if (C->log() != nullptr) {
295 C->log()->elem("OSR_mismatch local_index='%d'",index);
296 }
297 set_local(index, null());
298 // and ignore it for the loads
299 continue;
300 }
301 }
302
303 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
304 if (type == Type::TOP || type == Type::HALF) {
305 continue;
306 }
307 // If the type falls to bottom, then this must be a local that
308 // is mixing ints and oops or some such. Forcing it to top
309 // makes it go dead.
310 if (type == Type::BOTTOM) {
311 continue;
312 }
313 // Construct code to access the appropriate local.
314 BasicType bt = type->basic_type();
315 if (type == TypePtr::NULL_PTR) {
316 // Ptr types are mixed together with T_ADDRESS but null is
317 // really for T_OBJECT types so correct it.
318 bt = T_OBJECT;
319 }
320 Node *value = fetch_interpreter_state(index, bt, locals_addr);
321 set_local(index, value);
322 }
323
324 // Extract the needed stack entries from the interpreter frame.
325 for (index = 0; index < sp(); index++) {
326 const Type *type = osr_block->stack_type_at(index);
327 if (type != Type::TOP) {
328 // Currently the compiler bails out when attempting to on stack replace
329 // at a bci with a non-empty stack. We should not reach here.
330 ShouldNotReachHere();
331 }
332 }
333
334 // End the OSR migration
335 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
336 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
337 "OSR_migration_end", TypeRawPtr::BOTTOM,
338 osr_buf);
339
340 // Now that the interpreter state is loaded, make sure it will match
341 // at execution time what the compiler is expecting now:
342 SafePointNode* bad_type_exit = clone_map();
343 bad_type_exit->set_control(new RegionNode(1));
344
345 assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point");
346 for (index = 0; index < max_locals; index++) {
347 if (stopped()) break;
348 Node* l = local(index);
349 if (l->is_top()) continue; // nothing here
350 const Type *type = osr_block->local_type_at(index);
351 if (type->isa_oopptr() != nullptr) {
352 if (!live_oops.at(index)) {
353 // skip type check for dead oops
354 continue;
355 }
356 }
357 if (osr_block->flow()->local_type_at(index)->is_return_address()) {
358 // In our current system it's illegal for jsr addresses to be
359 // live into an OSR entry point because the compiler performs
360 // inlining of jsrs. ciTypeFlow has a bailout that detect this
361 // case and aborts the compile if addresses are live into an OSR
362 // entry point. Because of that we can assume that any address
363 // locals at the OSR entry point are dead. Method liveness
364 // isn't precise enough to figure out that they are dead in all
365 // cases so simply skip checking address locals all
366 // together. Any type check is guaranteed to fail since the
367 // interpreter type is the result of a load which might have any
368 // value and the expected type is a constant.
369 continue;
370 }
371 set_local(index, check_interpreter_type(l, type, bad_type_exit));
372 if (StressReachabilityFences && type->isa_oopptr() != nullptr) {
373 // Keep all oop locals alive until the method returns as if there are
374 // reachability fences for them at the end of the method.
375 Node* loc = local(index);
376 if (loc->bottom_type() != TypePtr::NULL_PTR) {
377 assert(loc->bottom_type()->isa_oopptr() != nullptr, "%s", Type::str(loc->bottom_type()));
378 _stress_rf_hook->add_req(loc);
379 }
380 }
381 }
382
383 for (index = 0; index < sp(); index++) {
384 if (stopped()) break;
385 Node* l = stack(index);
386 if (l->is_top()) continue; // nothing here
387 const Type *type = osr_block->stack_type_at(index);
388 set_stack(index, check_interpreter_type(l, type, bad_type_exit));
389 if (StressReachabilityFences && type->isa_oopptr() != nullptr) {
390 // Keep all oops on stack alive until the method returns as if there are
391 // reachability fences for them at the end of the method.
392 Node* stk = stack(index);
393 if (stk->bottom_type() != TypePtr::NULL_PTR) {
394 assert(stk->bottom_type()->isa_oopptr() != nullptr, "%s", Type::str(stk->bottom_type()));
395 _stress_rf_hook->add_req(stk);
396 }
397 }
398 }
399
400 if (bad_type_exit->control()->req() > 1) {
401 // Build an uncommon trap here, if any inputs can be unexpected.
402 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
403 record_for_igvn(bad_type_exit->control());
404 SafePointNode* types_are_good = map();
405 set_map(bad_type_exit);
406 // The unexpected type happens because a new edge is active
407 // in the CFG, which typeflow had previously ignored.
408 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
409 // This x will be typed as Integer if notReached is not yet linked.
410 // It could also happen due to a problem in ciTypeFlow analysis.
411 uncommon_trap(Deoptimization::Reason_constraint,
412 Deoptimization::Action_reinterpret);
413 set_map(types_are_good);
414 }
415 }
416
417 //------------------------------Parse------------------------------------------
418 // Main parser constructor.
419 Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
420 : _exits(caller)
421 {
422 // Init some variables
423 _caller = caller;
424 _method = parse_method;
425 _expected_uses = expected_uses;
426 _depth = 1 + (caller->has_method() ? caller->depth() : 0);
427 _wrote_final = false;
428 _wrote_volatile = false;
429 _wrote_stable = false;
430 _wrote_fields = false;
431 _alloc_with_final_or_stable = nullptr;
432 _stress_rf_hook = (StressReachabilityFences ? new Node(1) : nullptr);
433 _block = nullptr;
434 _first_return = true;
435 _replaced_nodes_for_exceptions = false;
436 _new_idx = C->unique();
437 DEBUG_ONLY(_entry_bci = UnknownBci);
438 DEBUG_ONLY(_block_count = -1);
439 DEBUG_ONLY(_blocks = (Block*)-1);
440 #ifndef PRODUCT
441 if (PrintCompilation || PrintOpto) {
442 // Make sure I have an inline tree, so I can print messages about it.
443 InlineTree::find_subtree_from_root(C->ilt(), caller, parse_method);
444 }
445 _max_switch_depth = 0;
446 _est_switch_depth = 0;
447 #endif
513 }
514
515 assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier");
516 assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
517
518 // Always register dependence if JVMTI is enabled, because
519 // either breakpoint setting or hotswapping of methods may
520 // cause deoptimization.
521 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
522 C->dependencies()->assert_evol_method(method());
523 }
524
525 NOT_PRODUCT(methods_seen++);
526
527 // Do some special top-level things.
528 if (depth() == 1 && C->is_osr_compilation()) {
529 _tf = C->tf(); // the OSR entry type is different
530 _entry_bci = C->entry_bci();
531 _flow = method()->get_osr_flow_analysis(osr_bci());
532 } else {
533 _tf = TypeFunc::make(method());
534 _entry_bci = InvocationEntryBci;
535 _flow = method()->get_flow_analysis();
536 }
537
538 if (_flow->failing()) {
539 assert(false, "type flow analysis failed during parsing");
540 C->record_method_not_compilable(_flow->failure_reason());
541 #ifndef PRODUCT
542 if (PrintOpto && (Verbose || WizardMode)) {
543 if (is_osr_parse()) {
544 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
545 } else {
546 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
547 }
548 if (Verbose) {
549 method()->print();
550 method()->print_codes();
551 _flow->print();
552 }
553 }
800 void Parse::build_exits() {
801 // make a clone of caller to prevent sharing of side-effects
802 _exits.set_map(_exits.clone_map());
803 _exits.clean_stack(_exits.sp());
804 _exits.sync_jvms();
805
806 RegionNode* region = new RegionNode(1);
807 record_for_igvn(region);
808 gvn().set_type_bottom(region);
809 _exits.set_control(region);
810
811 // Note: iophi and memphi are not transformed until do_exits.
812 Node* iophi = new PhiNode(region, Type::ABIO);
813 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
814 gvn().set_type_bottom(iophi);
815 gvn().set_type_bottom(memphi);
816 _exits.set_i_o(iophi);
817 _exits.set_all_memory(memphi);
818
819 // Add a return value to the exit state. (Do not push it yet.)
820 if (tf()->range()->cnt() > TypeFunc::Parms) {
821 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
822 if (ret_type->isa_int()) {
823 BasicType ret_bt = method()->return_type()->basic_type();
824 if (ret_bt == T_BOOLEAN ||
825 ret_bt == T_CHAR ||
826 ret_bt == T_BYTE ||
827 ret_bt == T_SHORT) {
828 ret_type = TypeInt::INT;
829 }
830 }
831
832 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
833 // becomes loaded during the subsequent parsing, the loaded and unloaded
834 // types will not join when we transform and push in do_exits().
835 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
836 if (ret_oop_type && !ret_oop_type->is_loaded()) {
837 ret_type = TypeOopPtr::BOTTOM;
838 }
839 int ret_size = type2size[ret_type->basic_type()];
840 Node* ret_phi = new PhiNode(region, ret_type);
841 gvn().set_type_bottom(ret_phi);
842 _exits.ensure_stack(ret_size);
843 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
844 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
845 _exits.set_argument(0, ret_phi); // here is where the parser finds it
846 // Note: ret_phi is not yet pushed, until do_exits.
847 }
848 }
849
850
851 //----------------------------build_start_state-------------------------------
852 // Construct a state which contains only the incoming arguments from an
853 // unknown caller. The method & bci will be null & InvocationEntryBci.
854 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
855 int arg_size = tf->domain()->cnt();
856 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
857 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
858 SafePointNode* map = new SafePointNode(max_size, jvms);
859 record_for_igvn(map);
860 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
861 Node_Notes* old_nn = default_node_notes();
862 if (old_nn != nullptr && has_method()) {
863 Node_Notes* entry_nn = old_nn->clone(this);
864 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
865 entry_jvms->set_offsets(0);
866 entry_jvms->set_bci(entry_bci());
867 entry_nn->set_jvms(entry_jvms);
868 set_default_node_notes(entry_nn);
869 }
870 uint i;
871 for (i = 0; i < (uint)arg_size; i++) {
872 Node* parm = initial_gvn()->transform(new ParmNode(start, i));
873 map->init_req(i, parm);
874 // Record all these guys for later GVN.
875 record_for_igvn(parm);
876 }
877 for (; i < map->req(); i++) {
878 map->init_req(i, top());
879 }
880 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
881 set_default_node_notes(old_nn);
882 jvms->set_map(map);
883 return jvms;
884 }
885
886 //-----------------------------make_node_notes---------------------------------
887 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
888 if (caller_nn == nullptr) return nullptr;
889 Node_Notes* nn = caller_nn->clone(C);
890 JVMState* caller_jvms = nn->jvms();
891 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
892 jvms->set_offsets(0);
893 jvms->set_bci(_entry_bci);
894 nn->set_jvms(jvms);
895 return nn;
896 }
897
898
899 //--------------------------return_values--------------------------------------
900 void Compile::return_values(JVMState* jvms) {
901 GraphKit kit(jvms);
902 Node* ret = new ReturnNode(TypeFunc::Parms,
903 kit.control(),
904 kit.i_o(),
905 kit.reset_memory(),
906 kit.frameptr(),
907 kit.returnadr());
908 // Add zero or 1 return values
909 int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
910 if (ret_size > 0) {
911 kit.inc_sp(-ret_size); // pop the return value(s)
912 kit.sync_jvms();
913 ret->add_req(kit.argument(0));
914 // Note: The second dummy edge is not needed by a ReturnNode.
915 }
916 // bind it to root
917 root()->add_req(ret);
918 record_for_igvn(ret);
919 initial_gvn()->transform(ret);
920 }
921
922 //------------------------rethrow_exceptions-----------------------------------
923 // Bind all exception states in the list into a single RethrowNode.
924 void Compile::rethrow_exceptions(JVMState* jvms) {
925 GraphKit kit(jvms);
926 if (!kit.has_exceptions()) return; // nothing to generate
927 // Load my combined exception state into the kit, with all phis transformed:
928 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
929 Node* ex_oop = kit.use_exception_state(ex_map);
930 RethrowNode* exit = new RethrowNode(kit.control(),
931 kit.i_o(), kit.reset_memory(),
932 kit.frameptr(), kit.returnadr(),
933 // like a return but with exception input
934 ex_oop);
1018 // to complete, we force all writes to complete.
1019 //
1020 // 2. Experimental VM option is used to force the barrier if any field
1021 // was written out in the constructor.
1022 //
1023 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1024 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1025 // MemBarVolatile is used before volatile load instead of after volatile
1026 // store, so there's no barrier after the store.
1027 // We want to guarantee the same behavior as on platforms with total store
1028 // order, although this is not required by the Java memory model.
1029 // In this case, we want to enforce visibility of volatile field
1030 // initializations which are performed in constructors.
1031 // So as with finals, we add a barrier here.
1032 //
1033 // "All bets are off" unless the first publication occurs after a
1034 // normal return from the constructor. We do not attempt to detect
1035 // such unusual early publications. But no barrier is needed on
1036 // exceptional returns, since they cannot publish normally.
1037 //
1038 if (method()->is_object_initializer() &&
1039 (wrote_final() || wrote_stable() ||
1040 (AlwaysSafeConstructors && wrote_fields()) ||
1041 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1042 Node* recorded_alloc = alloc_with_final_or_stable();
1043 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1044 recorded_alloc);
1045
1046 // If Memory barrier is created for final fields write
1047 // and allocation node does not escape the initialize method,
1048 // then barrier introduced by allocation node can be removed.
1049 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1050 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1051 alloc->compute_MemBar_redundancy(method());
1052 }
1053 if (PrintOpto && (Verbose || WizardMode)) {
1054 method()->print_name();
1055 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1056 }
1057 }
1058
1059 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1060 // transform each slice of the original memphi:
1061 mms.set_memory(_gvn.transform(mms.memory()));
1062 }
1063 // Clean up input MergeMems created by transforming the slices
1064 _gvn.transform(_exits.merged_memory());
1065
1066 if (tf()->range()->cnt() > TypeFunc::Parms) {
1067 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1068 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1069 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1070 // If the type we set for the ret_phi in build_exits() is too optimistic and
1071 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1072 // loading. It could also be due to an error, so mark this method as not compilable because
1073 // otherwise this could lead to an infinite compile loop.
1074 // In any case, this code path is rarely (and never in my testing) reached.
1075 C->record_method_not_compilable("Can't determine return type.");
1076 return;
1077 }
1078 if (ret_type->isa_int()) {
1079 BasicType ret_bt = method()->return_type()->basic_type();
1080 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1081 }
1082 _exits.push_node(ret_type->basic_type(), ret_phi);
1083 }
1084
1085 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1086
1087 // Unlock along the exceptional paths.
1141
1142 //-----------------------------create_entry_map-------------------------------
1143 // Initialize our parser map to contain the types at method entry.
1144 // For OSR, the map contains a single RawPtr parameter.
1145 // Initial monitor locking for sync. methods is performed by do_method_entry.
1146 SafePointNode* Parse::create_entry_map() {
1147 // Check for really stupid bail-out cases.
1148 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1149 if (len >= 32760) {
1150 // Bailout expected, this is a very rare edge case.
1151 C->record_method_not_compilable("too many local variables");
1152 return nullptr;
1153 }
1154
1155 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1156 _caller->map()->delete_replaced_nodes();
1157
1158 // If this is an inlined method, we may have to do a receiver null check.
1159 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1160 GraphKit kit(_caller);
1161 kit.null_check_receiver_before_call(method());
1162 _caller = kit.transfer_exceptions_into_jvms();
1163 if (kit.stopped()) {
1164 _exits.add_exception_states_from(_caller);
1165 _exits.set_jvms(_caller);
1166 return nullptr;
1167 }
1168 }
1169
1170 assert(method() != nullptr, "parser must have a method");
1171
1172 // Create an initial safepoint to hold JVM state during parsing
1173 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1174 set_map(new SafePointNode(len, jvms));
1175
1176 // Capture receiver info for compiled lambda forms.
1177 if (method()->is_compiled_lambda_form()) {
1178 ciInstance* recv_info = _caller->compute_receiver_info(method());
1179 jvms->set_receiver_info(recv_info);
1180 }
1181
1182 jvms->set_map(map());
1186 SafePointNode* inmap = _caller->map();
1187 assert(inmap != nullptr, "must have inmap");
1188 // In case of null check on receiver above
1189 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1190
1191 uint i;
1192
1193 // Pass thru the predefined input parameters.
1194 for (i = 0; i < TypeFunc::Parms; i++) {
1195 map()->init_req(i, inmap->in(i));
1196 }
1197
1198 if (depth() == 1) {
1199 assert(map()->memory()->Opcode() == Op_Parm, "");
1200 // Insert the memory aliasing node
1201 set_all_memory(reset_memory());
1202 }
1203 assert(merged_memory(), "");
1204
1205 // Now add the locals which are initially bound to arguments:
1206 uint arg_size = tf()->domain()->cnt();
1207 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1208 for (i = TypeFunc::Parms; i < arg_size; i++) {
1209 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1210 }
1211
1212 // Clear out the rest of the map (locals and stack)
1213 for (i = arg_size; i < len; i++) {
1214 map()->init_req(i, top());
1215 }
1216
1217 SafePointNode* entry_map = stop();
1218 return entry_map;
1219 }
1220
1221 //-----------------------is_auto_boxed_primitive------------------------------
1222 // Helper method to detect auto-boxed primitives (result of valueOf() call).
1223 static bool is_auto_boxed_primitive(Node* n) {
1224 return (n->is_Proj() && n->as_Proj()->_con == TypeFunc::Parms &&
1225 n->in(0)->is_CallJava() &&
1226 n->in(0)->as_CallJava()->method()->is_boxing_method());
1227 }
1228
1229 //-----------------------------do_method_entry--------------------------------
1230 // Emit any code needed in the pseudo-block before BCI zero.
1231 // The main thing to do is lock the receiver of a synchronized method.
1232 void Parse::do_method_entry() {
1233 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1234 set_sp(0); // Java Stack Pointer
1235
1236 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1237
1238 if (C->env()->dtrace_method_probes()) {
1239 make_dtrace_method_entry(method());
1240 }
1241
1242 if (StressReachabilityFences) {
1243 // Keep all oop arguments alive until the method returns as if there are
1244 // reachability fences for them at the end of the method.
1245 int max_locals = jvms()->loc_size();
1246 for (int idx = 0; idx < max_locals; idx++) {
1247 Node* loc = local(idx);
1248 if (loc->bottom_type()->isa_oopptr() != nullptr &&
1249 !is_auto_boxed_primitive(loc)) { // ignore auto-boxed primitives
1250 _stress_rf_hook->add_req(loc);
1251 }
1252 }
1253 }
1254
1255 #ifdef ASSERT
1256 // Narrow receiver type when it is too broad for the method being parsed.
1257 if (!method()->is_static()) {
1280
1281 // If the method is synchronized, we need to construct a lock node, attach
1282 // it to the Start node, and pin it there.
1283 if (method()->is_synchronized()) {
1284 // Insert a FastLockNode right after the Start which takes as arguments
1285 // the current thread pointer, the "this" pointer & the address of the
1286 // stack slot pair used for the lock. The "this" pointer is a projection
1287 // off the start node, but the locking spot has to be constructed by
1288 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1289 // becomes the second argument to the FastLockNode call. The
1290 // FastLockNode becomes the new control parent to pin it to the start.
1291
1292 // Setup Object Pointer
1293 Node *lock_obj = nullptr;
1294 if (method()->is_static()) {
1295 ciInstance* mirror = _method->holder()->java_mirror();
1296 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1297 lock_obj = makecon(t_lock);
1298 } else { // Else pass the "this" pointer,
1299 lock_obj = local(0); // which is Parm0 from StartNode
1300 }
1301 // Clear out dead values from the debug info.
1302 kill_dead_locals();
1303 // Build the FastLockNode
1304 _synch_lock = shared_lock(lock_obj);
1305 // Check for bailout in shared_lock
1306 if (failing()) { return; }
1307 }
1308
1309 // Feed profiling data for parameters to the type system so it can
1310 // propagate it as speculative types
1311 record_profiled_parameters_for_speculation();
1312 }
1313
1314 //------------------------------init_blocks------------------------------------
1315 // Initialize our parser map to contain the types/monitors at method entry.
1316 void Parse::init_blocks() {
1317 // Create the blocks.
1318 _block_count = flow()->block_count();
1319 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1320
1321 // Initialize the structs.
1322 for (int rpo = 0; rpo < block_count(); rpo++) {
1323 Block* block = rpo_at(rpo);
1324 new(block) Block(this, rpo);
1325 }
1326
1327 // Collect predecessor and successor information.
1328 for (int rpo = 0; rpo < block_count(); rpo++) {
1329 Block* block = rpo_at(rpo);
1330 block->init_graph(this);
1331 }
1720 //--------------------handle_missing_successor---------------------------------
1721 void Parse::handle_missing_successor(int target_bci) {
1722 #ifndef PRODUCT
1723 Block* b = block();
1724 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1725 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1726 #endif
1727 ShouldNotReachHere();
1728 }
1729
1730 //--------------------------merge_common---------------------------------------
1731 void Parse::merge_common(Parse::Block* target, int pnum) {
1732 if (TraceOptoParse) {
1733 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1734 }
1735
1736 // Zap extra stack slots to top
1737 assert(sp() == target->start_sp(), "");
1738 clean_stack(sp());
1739
1740 if (!target->is_merged()) { // No prior mapping at this bci
1741 if (TraceOptoParse) { tty->print(" with empty state"); }
1742
1743 // If this path is dead, do not bother capturing it as a merge.
1744 // It is "as if" we had 1 fewer predecessors from the beginning.
1745 if (stopped()) {
1746 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1747 return;
1748 }
1749
1750 // Make a region if we know there are multiple or unpredictable inputs.
1751 // (Also, if this is a plain fall-through, we might see another region,
1752 // which must not be allowed into this block's map.)
1753 if (pnum > PhiNode::Input // Known multiple inputs.
1754 || target->is_handler() // These have unpredictable inputs.
1755 || target->is_loop_head() // Known multiple inputs
1756 || control()->is_Region()) { // We must hide this guy.
1757
1758 int current_bci = bci();
1759 set_parse_bci(target->start()); // Set target bci
1774 record_for_igvn(r);
1775 // zap all inputs to null for debugging (done in Node(uint) constructor)
1776 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1777 r->init_req(pnum, control());
1778 set_control(r);
1779 target->copy_irreducible_status_to(r, jvms());
1780 set_parse_bci(current_bci); // Restore bci
1781 }
1782
1783 // Convert the existing Parser mapping into a mapping at this bci.
1784 store_state_to(target);
1785 assert(target->is_merged(), "do not come here twice");
1786
1787 } else { // Prior mapping at this bci
1788 if (TraceOptoParse) { tty->print(" with previous state"); }
1789 #ifdef ASSERT
1790 if (target->is_SEL_head()) {
1791 target->mark_merged_backedge(block());
1792 }
1793 #endif
1794 // We must not manufacture more phis if the target is already parsed.
1795 bool nophi = target->is_parsed();
1796
1797 SafePointNode* newin = map();// Hang on to incoming mapping
1798 Block* save_block = block(); // Hang on to incoming block;
1799 load_state_from(target); // Get prior mapping
1800
1801 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1802 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1803 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1804 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1805
1806 // Iterate over my current mapping and the old mapping.
1807 // Where different, insert Phi functions.
1808 // Use any existing Phi functions.
1809 assert(control()->is_Region(), "must be merging to a region");
1810 RegionNode* r = control()->as_Region();
1811
1812 // Compute where to merge into
1813 // Merge incoming control path
1814 r->init_req(pnum, newin->control());
1815
1816 if (pnum == 1) { // Last merge for this Region?
1817 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1818 Node* result = _gvn.transform(r);
1819 if (r != result && TraceOptoParse) {
1820 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1821 }
1822 }
1823 record_for_igvn(r);
1824 }
1825
1826 // Update all the non-control inputs to map:
1827 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1828 bool check_elide_phi = target->is_SEL_backedge(save_block);
1829 for (uint j = 1; j < newin->req(); j++) {
1830 Node* m = map()->in(j); // Current state of target.
1831 Node* n = newin->in(j); // Incoming change to target state.
1832 PhiNode* phi;
1833 if (m->is_Phi() && m->as_Phi()->region() == r)
1834 phi = m->as_Phi();
1835 else
1836 phi = nullptr;
1837 if (m != n) { // Different; must merge
1838 switch (j) {
1839 // Frame pointer and Return Address never changes
1840 case TypeFunc::FramePtr:// Drop m, use the original value
1841 case TypeFunc::ReturnAdr:
1842 break;
1843 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1844 assert(phi == nullptr, "the merge contains phis, not vice versa");
1845 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1846 continue;
1847 default: // All normal stuff
1848 if (phi == nullptr) {
1849 const JVMState* jvms = map()->jvms();
1850 if (EliminateNestedLocks &&
1851 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1852 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1853 // Use old BoxLock node as merged box.
1854 assert(newin->jvms()->is_monitor_box(j), "sanity");
1855 // This assert also tests that nodes are BoxLock.
1856 assert(BoxLockNode::same_slot(n, m), "sanity");
1863 // Incremental Inlining before EA and Macro nodes elimination.
1864 //
1865 // Incremental Inlining is executed after IGVN optimizations
1866 // during which BoxLock can be marked as Coarsened.
1867 old_box->set_coarsened(); // Verifies state
1868 old_box->set_unbalanced();
1869 }
1870 C->gvn_replace_by(n, m);
1871 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1872 phi = ensure_phi(j, nophi);
1873 }
1874 }
1875 break;
1876 }
1877 }
1878 // At this point, n might be top if:
1879 // - there is no phi (because TypeFlow detected a conflict), or
1880 // - the corresponding control edges is top (a dead incoming path)
1881 // It is a bug if we create a phi which sees a garbage value on a live path.
1882
1883 if (phi != nullptr) {
1884 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1885 assert(phi->region() == r, "");
1886 phi->set_req(pnum, n); // Then add 'n' to the merge
1887 if (pnum == PhiNode::Input) {
1888 // Last merge for this Phi.
1889 // So far, Phis have had a reasonable type from ciTypeFlow.
1890 // Now _gvn will join that with the meet of current inputs.
1891 // BOTTOM is never permissible here, 'cause pessimistically
1892 // Phis of pointers cannot lose the basic pointer type.
1893 DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
1894 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1895 map()->set_req(j, _gvn.transform(phi));
1896 DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
1897 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1898 record_for_igvn(phi);
1899 }
1900 }
1901 } // End of for all values to be merged
1902
1903 if (pnum == PhiNode::Input &&
1904 !r->in(0)) { // The occasional useless Region
1905 assert(control() == r, "");
1906 set_control(r->nonnull_req());
1907 }
1908
1909 map()->merge_replaced_nodes_with(newin);
1910
1911 // newin has been subsumed into the lazy merge, and is now dead.
1912 set_block(save_block);
1913
1914 stop(); // done with this guy, for now
1915 }
1916
1917 if (TraceOptoParse) {
1918 tty->print_cr(" on path %d", pnum);
1919 }
1920
1921 // Done with this parser state.
1922 assert(stopped(), "");
1923 }
1924
2036
2037 // Add new path to the region.
2038 uint pnum = r->req();
2039 r->add_req(nullptr);
2040
2041 for (uint i = 1; i < map->req(); i++) {
2042 Node* n = map->in(i);
2043 if (i == TypeFunc::Memory) {
2044 // Ensure a phi on all currently known memories.
2045 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2046 Node* phi = mms.memory();
2047 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2048 assert(phi->req() == pnum, "must be same size as region");
2049 phi->add_req(nullptr);
2050 }
2051 }
2052 } else {
2053 if (n->is_Phi() && n->as_Phi()->region() == r) {
2054 assert(n->req() == pnum, "must be same size as region");
2055 n->add_req(nullptr);
2056 }
2057 }
2058 }
2059
2060 return pnum;
2061 }
2062
2063 //------------------------------ensure_phi-------------------------------------
2064 // Turn the idx'th entry of the current map into a Phi
2065 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2066 SafePointNode* map = this->map();
2067 Node* region = map->control();
2068 assert(region->is_Region(), "");
2069
2070 Node* o = map->in(idx);
2071 assert(o != nullptr, "");
2072
2073 if (o == top()) return nullptr; // TOP always merges into TOP
2074
2075 if (o->is_Phi() && o->as_Phi()->region() == region) {
2076 return o->as_Phi();
2077 }
2078
2079 // Now use a Phi here for merging
2080 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2081 const JVMState* jvms = map->jvms();
2082 const Type* t = nullptr;
2083 if (jvms->is_loc(idx)) {
2084 t = block()->local_type_at(idx - jvms->locoff());
2085 } else if (jvms->is_stk(idx)) {
2086 t = block()->stack_type_at(idx - jvms->stkoff());
2087 } else if (jvms->is_mon(idx)) {
2088 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2089 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2090 } else if ((uint)idx < TypeFunc::Parms) {
2091 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2092 } else {
2093 assert(false, "no type information for this phi");
2094 }
2095
2096 // If the type falls to bottom, then this must be a local that
2097 // is mixing ints and oops or some such. Forcing it to top
2098 // makes it go dead.
2099 if (t == Type::BOTTOM) {
2100 map->set_req(idx, top());
2101 return nullptr;
2102 }
2103
2104 // Do not create phis for top either.
2105 // A top on a non-null control flow must be an unused even after the.phi.
2106 if (t == Type::TOP || t == Type::HALF) {
2107 map->set_req(idx, top());
2108 return nullptr;
2109 }
2110
2111 PhiNode* phi = PhiNode::make(region, o, t);
2112 gvn().set_type(phi, t);
2113 if (C->do_escape_analysis()) record_for_igvn(phi);
2114 map->set_req(idx, phi);
2115 return phi;
2116 }
2117
2118 //--------------------------ensure_memory_phi----------------------------------
2119 // Turn the idx'th slice of the current memory into a Phi
2120 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2121 MergeMemNode* mem = merged_memory();
2122 Node* region = control();
2123 assert(region->is_Region(), "");
2124
2125 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2126 assert(o != nullptr && o != top(), "");
2127
2128 PhiNode* phi;
2129 if (o->is_Phi() && o->as_Phi()->region() == region) {
2130 phi = o->as_Phi();
2131 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2132 // clone the shared base memory phi to make a new memory split
2133 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2134 const Type* t = phi->bottom_type();
2135 const TypePtr* adr_type = C->get_adr_type(idx);
2225 // Add check to deoptimize once holder klass is fully initialized.
2226 void Parse::clinit_deopt() {
2227 assert(C->has_method(), "only for normal compilations");
2228 assert(depth() == 1, "only for main compiled method");
2229 assert(is_normal_parse(), "no barrier needed on osr entry");
2230 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2231
2232 set_parse_bci(0);
2233
2234 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2235 guard_klass_being_initialized(holder);
2236 }
2237
2238 //------------------------------return_current---------------------------------
2239 // Append current _map to _exit_return
2240 void Parse::return_current(Node* value) {
2241 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2242 call_register_finalizer();
2243 }
2244
2245 if (StressReachabilityFences) {
2246 // Insert reachability fences for all oop arguments at the end of the method.
2247 for (uint i = 1; i < _stress_rf_hook->req(); i++) {
2248 Node* referent = _stress_rf_hook->in(i);
2249 assert(referent->bottom_type()->isa_oopptr(), "%s", Type::str(referent->bottom_type()));
2250 insert_reachability_fence(referent);
2251 }
2252 }
2253
2254 // Do not set_parse_bci, so that return goo is credited to the return insn.
2255 set_bci(InvocationEntryBci);
2256 if (method()->is_synchronized()) {
2257 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2258 }
2259 if (C->env()->dtrace_method_probes()) {
2260 make_dtrace_method_exit(method());
2261 }
2262 SafePointNode* exit_return = _exits.map();
2263 exit_return->in( TypeFunc::Control )->add_req( control() );
2264 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2265 Node *mem = exit_return->in( TypeFunc::Memory );
2266 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2267 if (mms.is_empty()) {
2268 // get a copy of the base memory, and patch just this one input
2269 const TypePtr* adr_type = mms.adr_type(C);
2270 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2271 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2272 gvn().set_type_bottom(phi);
2273 phi->del_req(phi->req()-1); // prepare to re-patch
2274 mms.set_memory(phi);
2275 }
2276 mms.memory()->add_req(mms.memory2());
2277 }
2278
2279 // frame pointer is always same, already captured
2280 if (value != nullptr) {
2281 // If returning oops to an interface-return, there is a silent free
2282 // cast from oop to interface allowed by the Verifier. Make it explicit
2283 // here.
2284 Node* phi = _exits.argument(0);
2285 phi->add_req(value);
2286 }
2287
2288 if (_first_return) {
2289 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2290 _first_return = false;
2291 } else {
2292 _exits.map()->merge_replaced_nodes_with(map());
2293 }
2294
2295 stop_and_kill_map(); // This CFG path dies here
2296 }
2297
2298
2299 //------------------------------add_safepoint----------------------------------
2300 void Parse::add_safepoint() {
2301 uint parms = TypeFunc::Parms+1;
2302
2303 // Clear out dead values from the debug info.
2304 kill_dead_locals();
2305
2306 // Clone the JVM State
2307 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciObjArrayKlass.hpp"
26 #include "ci/ciSignature.hpp"
27 #include "ci/ciTypeFlow.hpp"
28 #include "compiler/compileLog.hpp"
29 #include "interpreter/linkResolver.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/method.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/c2compiler.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/idealGraphPrinter.hpp"
37 #include "opto/inlinetypenode.hpp"
38 #include "opto/locknode.hpp"
39 #include "opto/memnode.hpp"
40 #include "opto/opaquenode.hpp"
41 #include "opto/parse.hpp"
42 #include "opto/rootnode.hpp"
43 #include "opto/runtime.hpp"
44 #include "opto/type.hpp"
45 #include "runtime/arguments.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/safepointMechanism.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "utilities/bitMap.inline.hpp"
50 #include "utilities/copy.hpp"
51
52 // Static array so we can figure out which bytecodes stop us from compiling
53 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
54 // and eventually should be encapsulated in a proper class (gri 8/18/98).
55
56 #ifndef PRODUCT
57 uint nodes_created = 0;
58 uint methods_parsed = 0;
59 uint methods_seen = 0;
60 uint blocks_parsed = 0;
61 uint blocks_seen = 0;
62
63 uint explicit_null_checks_inserted = 0;
64 uint explicit_null_checks_elided = 0;
65 uint all_null_checks_found = 0;
90 }
91 if (all_null_checks_found) {
92 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
93 (100*implicit_null_checks)/all_null_checks_found);
94 }
95 if (SharedRuntime::_implicit_null_throws) {
96 tty->print_cr("%u implicit null exceptions at runtime",
97 SharedRuntime::_implicit_null_throws);
98 }
99
100 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
101 BytecodeParseHistogram::print();
102 }
103 }
104 #endif
105
106 //------------------------------ON STACK REPLACEMENT---------------------------
107
108 // Construct a node which can be used to get incoming state for
109 // on stack replacement.
110 Node* Parse::fetch_interpreter_state(int index,
111 const Type* type,
112 Node* local_addrs) {
113 BasicType bt = type->basic_type();
114 if (type == TypePtr::NULL_PTR) {
115 // Ptr types are mixed together with T_ADDRESS but nullptr is
116 // really for T_OBJECT types so correct it.
117 bt = T_OBJECT;
118 }
119 Node* mem = memory(Compile::AliasIdxRaw);
120 Node* adr = off_heap_plus_addr(local_addrs, -index*wordSize);
121 Node* ctl = control();
122
123 // Very similar to LoadNode::make, except we handle un-aligned longs and
124 // doubles on Sparc. Intel can handle them just fine directly.
125 Node *l = nullptr;
126 switch (bt) { // Signature is flattened
127 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
128 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
129 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
130 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
131 case T_LONG:
132 case T_DOUBLE: {
133 // Since arguments are in reverse order, the argument address 'adr'
134 // refers to the back half of the long/double. Recompute adr.
135 adr = off_heap_plus_addr(local_addrs, -(index+1)*wordSize);
136 if (Matcher::misaligned_doubles_ok) {
137 l = (bt == T_DOUBLE)
138 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
139 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
140 } else {
141 l = (bt == T_DOUBLE)
142 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
143 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
144 }
145 break;
146 }
147 default: ShouldNotReachHere();
148 }
149 return _gvn.transform(l);
150 }
151
152 // Helper routine to prevent the interpreter from handing
153 // unexpected typestate to an OSR method.
154 // The Node l is a value newly dug out of the interpreter frame.
155 // The type is the type predicted by ciTypeFlow. Note that it is
156 // not a general type, but can only come from Type::get_typeflow_type.
157 // The safepoint is a map which will feed an uncommon trap.
158 Node* Parse::check_interpreter_type(Node* l, ciType* ci_type, SafePointNode* &bad_type_exit) {
159 ciType* unwrapped_ci_type = ci_type->unwrap();
160
161 // TypeFlow may assert null-ness if a type appears unloaded.
162 if (int(unwrapped_ci_type->basic_type()) == int(ciTypeFlow::StateVector::T_NULL) ||
163 !unwrapped_ci_type->is_loaded()) {
164 // Value must be null, not a real oop.
165 Node* chk = _gvn.transform(new CmpPNode(l, null()));
166 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
167 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
168 set_control(_gvn.transform(new IfTrueNode(iff)));
169 Node* bad_type = _gvn.transform(new IfFalseNode(iff));
170 bad_type_exit->control()->add_req(bad_type);
171 l = null();
172 }
173
174 // Typeflow can also cut off paths from the CFG, based on
175 // types which appear unloaded, or call sites which appear unlinked.
176 // When paths are cut off, values at later merge points can rise
177 // toward more specific classes. Make sure these specific classes
178 // are still in effect.
179 if (unwrapped_ci_type->is_klass()) {
180 // TypeFlow asserted a specific object type. Value must have that type.
181 assert(_gvn.type(l) == TypePtr::NULL_PTR || _gvn.type(l)->isa_oopptr(), "must be an oop");
182 if (ci_type->is_null_free()) {
183 // Check inline types for null here to prevent checkcast from adding an
184 // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
185 Node* bad_type_ctrl = nullptr;
186 l = null_check_oop(l, &bad_type_ctrl);
187 bad_type_exit->control()->add_req(bad_type_ctrl);
188 }
189
190 const TypeKlassPtr* klass_type = TypeKlassPtr::make(unwrapped_ci_type->as_klass(), Type::ignore_interfaces);
191 klass_type = klass_type->try_improve();
192 bool is_early_larval = ci_type->is_early_larval();
193 Node* bad_type_ctrl = nullptr;
194 l = gen_checkcast(l, makecon(klass_type), &bad_type_ctrl, nullptr, false, is_early_larval);
195 bad_type_exit->control()->add_req(bad_type_ctrl);
196 } else {
197 const Type* type = Type::get_typeflow_type(ci_type);
198 assert(_gvn.type(l)->higher_equal(type), "must match");
199 }
200
201 return l;
202 }
203
204 // Helper routine which sets up elements of the initial parser map when
205 // performing a parse for on stack replacement. Add values into map.
206 // The only parameter contains the address of a interpreter arguments.
207 void Parse::load_interpreter_state(Node* osr_buf) {
208 int index;
209 int max_locals = jvms()->loc_size();
210 int max_stack = jvms()->stk_size();
211
212 // Mismatch between method and jvms can occur since map briefly held
213 // an OSR entry state (which takes up one RawPtr word).
214 assert(max_locals == method()->max_locals(), "sanity");
215 assert(max_stack >= method()->max_stack(), "sanity");
216 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
217 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
218
219 // Find the start block.
220 Block* osr_block = start_block();
221 assert(osr_block->start() == osr_bci(), "sanity");
222
223 // Set initial BCI.
224 set_parse_bci(osr_block->start());
225
226 // Set initial stack depth.
227 set_sp(osr_block->start_sp());
228
229 // Check bailouts. We currently do not perform on stack replacement
230 // of loops in catch blocks or loops which branch with a non-empty stack.
231 if (sp() != 0) {
246 for (index = 0; index < mcnt; index++) {
247 // Make a BoxLockNode for the monitor.
248 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
249 // Check for bailout after new BoxLockNode
250 if (failing()) { return; }
251
252 // This OSR locking region is unbalanced because it does not have Lock node:
253 // locking was done in Interpreter.
254 // This is similar to Coarsened case when Lock node is eliminated
255 // and as result the region is marked as Unbalanced.
256
257 // Emulate Coarsened state transition from Regular to Unbalanced.
258 osr_box->set_coarsened();
259 osr_box->set_unbalanced();
260
261 Node* box = _gvn.transform(osr_box);
262
263 // Displaced headers and locked objects are interleaved in the
264 // temp OSR buffer. We only copy the locked objects out here.
265 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
266 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr);
267 // Try and copy the displaced header to the BoxNode
268 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr);
269
270 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
271
272 // Build a bogus FastLockNode (no code will be generated) and push the
273 // monitor into our debug info.
274 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
275 map()->push_monitor(flock);
276
277 // If the lock is our method synchronization lock, tuck it away in
278 // _sync_lock for return and rethrow exit paths.
279 if (index == 0 && method()->is_synchronized()) {
280 _synch_lock = flock;
281 }
282 }
283
284 // Use the raw liveness computation to make sure that unexpected
285 // values don't propagate into the OSR frame.
286 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
287 if (!live_locals.is_valid()) {
288 // Degenerate or breakpointed method.
316 if (C->log() != nullptr) {
317 C->log()->elem("OSR_mismatch local_index='%d'",index);
318 }
319 set_local(index, null());
320 // and ignore it for the loads
321 continue;
322 }
323 }
324
325 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
326 if (type == Type::TOP || type == Type::HALF) {
327 continue;
328 }
329 // If the type falls to bottom, then this must be a local that
330 // is mixing ints and oops or some such. Forcing it to top
331 // makes it go dead.
332 if (type == Type::BOTTOM) {
333 continue;
334 }
335 // Construct code to access the appropriate local.
336 Node* value = fetch_interpreter_state(index, type, locals_addr);
337 set_local(index, value);
338 }
339
340 // Extract the needed stack entries from the interpreter frame.
341 for (index = 0; index < sp(); index++) {
342 const Type *type = osr_block->stack_type_at(index);
343 if (type != Type::TOP) {
344 // Currently the compiler bails out when attempting to on stack replace
345 // at a bci with a non-empty stack. We should not reach here.
346 ShouldNotReachHere();
347 }
348 }
349
350 // End the OSR migration
351 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
352 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
353 "OSR_migration_end", TypeRawPtr::BOTTOM,
354 osr_buf);
355
356 // Now that the interpreter state is loaded, make sure it will match
357 // at execution time what the compiler is expecting now:
358 SafePointNode* bad_type_exit = clone_map();
359 bad_type_exit->set_control(new RegionNode(1));
360
361 assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point");
362 for (index = 0; index < max_locals; index++) {
363 if (stopped()) {
364 break;
365 }
366 const Type *type = osr_block->local_type_at(index);
367 Node* l = local(index);
368 if (l->is_top()) {
369 continue;
370 }
371
372 ciType* ci_type = osr_block->flow()->local_type_at(index);
373 if (ci_type->unwrap()->is_klass() && !live_oops.at(index)) {
374 // skip type check for dead oops
375 continue;
376 }
377
378 if (ci_type->is_return_address()) {
379 // In our current system it's illegal for jsr addresses to be
380 // live into an OSR entry point because the compiler performs
381 // inlining of jsrs. ciTypeFlow has a bailout that detect this
382 // case and aborts the compile if addresses are live into an OSR
383 // entry point. Because of that we can assume that any address
384 // locals at the OSR entry point are dead. Method liveness
385 // isn't precise enough to figure out that they are dead in all
386 // cases so simply skip checking address locals all
387 // together. Any type check is guaranteed to fail since the
388 // interpreter type is the result of a load which might have any
389 // value and the expected type is a constant.
390 continue;
391 }
392 if (StressReachabilityFences && type->isa_oopptr() != nullptr) {
393 // Keep all oop locals alive until the method returns as if there are
394 // reachability fences for them at the end of the method.
395 Node* loc = local(index);
396 if (loc->bottom_type() != TypePtr::NULL_PTR) {
397 assert(loc->bottom_type()->isa_oopptr() != nullptr, "%s", Type::str(loc->bottom_type()));
398 _stress_rf_hook->add_req(loc);
399 }
400 }
401 set_local(index, check_interpreter_type(l, ci_type, bad_type_exit));
402 }
403
404 for (index = 0; index < sp(); index++) {
405 if (stopped()) {
406 break;
407 }
408 const Type *type = osr_block->local_type_at(index);
409 if (StressReachabilityFences && type->isa_oopptr() != nullptr) {
410 // Keep all oops on stack alive until the method returns as if there are
411 // reachability fences for them at the end of the method.
412 Node* stk = stack(index);
413 if (stk->bottom_type() != TypePtr::NULL_PTR) {
414 assert(stk->bottom_type()->isa_oopptr() != nullptr, "%s", Type::str(stk->bottom_type()));
415 _stress_rf_hook->add_req(stk);
416 }
417 }
418
419 Node* l = stack(index);
420 if (l->is_top()) {
421 continue;
422 }
423
424 ciType* ci_type = osr_block->flow()->stack_type_at(index);
425 set_stack(index, check_interpreter_type(l, ci_type, bad_type_exit));
426 }
427
428 if (bad_type_exit->control()->req() > 1) {
429 // Build an uncommon trap here, if any inputs can be unexpected.
430 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
431 record_for_igvn(bad_type_exit->control());
432 SafePointNode* types_are_good = map();
433 set_map(bad_type_exit);
434 // The unexpected type happens because a new edge is active
435 // in the CFG, which typeflow had previously ignored.
436 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
437 // This x will be typed as Integer if notReached is not yet linked.
438 // It could also happen due to a problem in ciTypeFlow analysis.
439 uncommon_trap(Deoptimization::Reason_constraint,
440 Deoptimization::Action_reinterpret);
441 set_map(types_are_good);
442 }
443 }
444
445 //------------------------------Parse------------------------------------------
446 // Main parser constructor.
447 Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
448 : _exits(caller)
449 {
450 // Init some variables
451 _caller = caller;
452 _method = parse_method;
453 _expected_uses = expected_uses;
454 _depth = 1 + (caller->has_method() ? caller->depth() : 0);
455 _wrote_non_strict_final = false;
456 _wrote_volatile = false;
457 _wrote_stable = false;
458 _wrote_fields = false;
459 _alloc_with_final_or_stable = nullptr;
460 _stress_rf_hook = (StressReachabilityFences ? new Node(1) : nullptr);
461 _block = nullptr;
462 _first_return = true;
463 _replaced_nodes_for_exceptions = false;
464 _new_idx = C->unique();
465 DEBUG_ONLY(_entry_bci = UnknownBci);
466 DEBUG_ONLY(_block_count = -1);
467 DEBUG_ONLY(_blocks = (Block*)-1);
468 #ifndef PRODUCT
469 if (PrintCompilation || PrintOpto) {
470 // Make sure I have an inline tree, so I can print messages about it.
471 InlineTree::find_subtree_from_root(C->ilt(), caller, parse_method);
472 }
473 _max_switch_depth = 0;
474 _est_switch_depth = 0;
475 #endif
541 }
542
543 assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier");
544 assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
545
546 // Always register dependence if JVMTI is enabled, because
547 // either breakpoint setting or hotswapping of methods may
548 // cause deoptimization.
549 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
550 C->dependencies()->assert_evol_method(method());
551 }
552
553 NOT_PRODUCT(methods_seen++);
554
555 // Do some special top-level things.
556 if (depth() == 1 && C->is_osr_compilation()) {
557 _tf = C->tf(); // the OSR entry type is different
558 _entry_bci = C->entry_bci();
559 _flow = method()->get_osr_flow_analysis(osr_bci());
560 } else {
561 _tf = TypeFunc::make(method(), false);
562 _entry_bci = InvocationEntryBci;
563 _flow = method()->get_flow_analysis();
564 }
565
566 if (_flow->failing()) {
567 assert(false, "type flow analysis failed during parsing");
568 C->record_method_not_compilable(_flow->failure_reason());
569 #ifndef PRODUCT
570 if (PrintOpto && (Verbose || WizardMode)) {
571 if (is_osr_parse()) {
572 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
573 } else {
574 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
575 }
576 if (Verbose) {
577 method()->print();
578 method()->print_codes();
579 _flow->print();
580 }
581 }
828 void Parse::build_exits() {
829 // make a clone of caller to prevent sharing of side-effects
830 _exits.set_map(_exits.clone_map());
831 _exits.clean_stack(_exits.sp());
832 _exits.sync_jvms();
833
834 RegionNode* region = new RegionNode(1);
835 record_for_igvn(region);
836 gvn().set_type_bottom(region);
837 _exits.set_control(region);
838
839 // Note: iophi and memphi are not transformed until do_exits.
840 Node* iophi = new PhiNode(region, Type::ABIO);
841 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
842 gvn().set_type_bottom(iophi);
843 gvn().set_type_bottom(memphi);
844 _exits.set_i_o(iophi);
845 _exits.set_all_memory(memphi);
846
847 // Add a return value to the exit state. (Do not push it yet.)
848 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
849 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
850 if (ret_type->isa_int()) {
851 BasicType ret_bt = method()->return_type()->basic_type();
852 if (ret_bt == T_BOOLEAN ||
853 ret_bt == T_CHAR ||
854 ret_bt == T_BYTE ||
855 ret_bt == T_SHORT) {
856 ret_type = TypeInt::INT;
857 }
858 }
859
860 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
861 // becomes loaded during the subsequent parsing, the loaded and unloaded
862 // types will not join when we transform and push in do_exits().
863 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
864 if (ret_oop_type && !ret_oop_type->is_loaded()) {
865 ret_type = TypeOopPtr::BOTTOM;
866 }
867 int ret_size = type2size[ret_type->basic_type()];
868 Node* ret_phi = new PhiNode(region, ret_type);
869 gvn().set_type_bottom(ret_phi);
870 _exits.ensure_stack(ret_size);
871 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
872 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
873 _exits.set_argument(0, ret_phi); // here is where the parser finds it
874 // Note: ret_phi is not yet pushed, until do_exits.
875 }
876 }
877
878 //----------------------------build_start_state-------------------------------
879 // Construct a state which contains only the incoming arguments from an
880 // unknown caller. The method & bci will be null & InvocationEntryBci.
881 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
882 int arg_size = tf->domain_sig()->cnt();
883 int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
884 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
885 SafePointNode* map = new SafePointNode(max_size, jvms);
886 jvms->set_map(map);
887 record_for_igvn(map);
888 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
889 Node_Notes* old_nn = default_node_notes();
890 if (old_nn != nullptr && has_method()) {
891 Node_Notes* entry_nn = old_nn->clone(this);
892 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
893 entry_jvms->set_offsets(0);
894 entry_jvms->set_bci(entry_bci());
895 entry_nn->set_jvms(entry_jvms);
896 set_default_node_notes(entry_nn);
897 }
898 PhaseGVN& gvn = *initial_gvn();
899 uint i = 0;
900 int arg_num = 0;
901 for (uint j = 0; i < (uint)arg_size; i++) {
902 const Type* t = tf->domain_sig()->field_at(i);
903 Node* parm = nullptr;
904 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
905 // Inline type arguments are not passed by reference: we get an argument per
906 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
907 GraphKit kit(jvms, &gvn);
908 kit.set_control(map->control());
909 Node* old_mem = map->memory();
910 // Use immutable memory for inline type loads and restore it below
911 kit.set_all_memory(C->immutable_memory());
912 parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
913 map->set_control(kit.control());
914 map->set_memory(old_mem);
915 } else {
916 parm = gvn.transform(new ParmNode(start, j++));
917 }
918 map->init_req(i, parm);
919 // Record all these guys for later GVN.
920 record_for_igvn(parm);
921 if (i >= TypeFunc::Parms && t != Type::HALF) {
922 arg_num++;
923 }
924 }
925 for (; i < map->req(); i++) {
926 map->init_req(i, top());
927 }
928 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
929 set_default_node_notes(old_nn);
930 return jvms;
931 }
932
933 //-----------------------------make_node_notes---------------------------------
934 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
935 if (caller_nn == nullptr) return nullptr;
936 Node_Notes* nn = caller_nn->clone(C);
937 JVMState* caller_jvms = nn->jvms();
938 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
939 jvms->set_offsets(0);
940 jvms->set_bci(_entry_bci);
941 nn->set_jvms(jvms);
942 return nn;
943 }
944
945
946 //--------------------------return_values--------------------------------------
947 void Compile::return_values(JVMState* jvms) {
948 GraphKit kit(jvms);
949 Node* ret = new ReturnNode(TypeFunc::Parms,
950 kit.control(),
951 kit.i_o(),
952 kit.reset_memory(),
953 kit.frameptr(),
954 kit.returnadr());
955 // Add zero or 1 return values
956 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
957 if (ret_size > 0) {
958 kit.inc_sp(-ret_size); // pop the return value(s)
959 kit.sync_jvms();
960 Node* res = kit.argument(0);
961 if (tf()->returns_inline_type_as_fields()) {
962 // Multiple return values (inline type fields): add as many edges
963 // to the Return node as returned values.
964 InlineTypeNode* vt = res->as_InlineType();
965 ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
966 if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
967 ret->init_req(TypeFunc::Parms, vt);
968 } else {
969 // Return the tagged klass pointer to signal scalarization to the caller
970 Node* tagged_klass = vt->tagged_klass(kit.gvn());
971 // Return null if the inline type is null (null marker field is not set)
972 Node* conv = kit.gvn().transform(new ConvI2LNode(vt->get_null_marker()));
973 Node* shl = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
974 Node* shr = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
975 tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
976 ret->init_req(TypeFunc::Parms, tagged_klass);
977 }
978 uint idx = TypeFunc::Parms + 1;
979 vt->pass_fields(&kit, ret, idx, false, false);
980 } else {
981 ret->add_req(res);
982 // Note: The second dummy edge is not needed by a ReturnNode.
983 }
984 }
985 // bind it to root
986 root()->add_req(ret);
987 record_for_igvn(ret);
988 initial_gvn()->transform(ret);
989 }
990
991 //------------------------rethrow_exceptions-----------------------------------
992 // Bind all exception states in the list into a single RethrowNode.
993 void Compile::rethrow_exceptions(JVMState* jvms) {
994 GraphKit kit(jvms);
995 if (!kit.has_exceptions()) return; // nothing to generate
996 // Load my combined exception state into the kit, with all phis transformed:
997 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
998 Node* ex_oop = kit.use_exception_state(ex_map);
999 RethrowNode* exit = new RethrowNode(kit.control(),
1000 kit.i_o(), kit.reset_memory(),
1001 kit.frameptr(), kit.returnadr(),
1002 // like a return but with exception input
1003 ex_oop);
1087 // to complete, we force all writes to complete.
1088 //
1089 // 2. Experimental VM option is used to force the barrier if any field
1090 // was written out in the constructor.
1091 //
1092 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1093 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1094 // MemBarVolatile is used before volatile load instead of after volatile
1095 // store, so there's no barrier after the store.
1096 // We want to guarantee the same behavior as on platforms with total store
1097 // order, although this is not required by the Java memory model.
1098 // In this case, we want to enforce visibility of volatile field
1099 // initializations which are performed in constructors.
1100 // So as with finals, we add a barrier here.
1101 //
1102 // "All bets are off" unless the first publication occurs after a
1103 // normal return from the constructor. We do not attempt to detect
1104 // such unusual early publications. But no barrier is needed on
1105 // exceptional returns, since they cannot publish normally.
1106 //
1107 if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1108 (wrote_non_strict_final() || wrote_stable() ||
1109 (AlwaysSafeConstructors && wrote_fields()) ||
1110 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1111 Node* recorded_alloc = alloc_with_final_or_stable();
1112 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1113 recorded_alloc);
1114
1115 // If Memory barrier is created for final fields write
1116 // and allocation node does not escape the initialize method,
1117 // then barrier introduced by allocation node can be removed.
1118 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1119 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1120 alloc->compute_MemBar_redundancy(method());
1121 }
1122 if (PrintOpto && (Verbose || WizardMode)) {
1123 method()->print_name();
1124 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1125 }
1126 }
1127
1128 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1129 // transform each slice of the original memphi:
1130 mms.set_memory(_gvn.transform(mms.memory()));
1131 }
1132 // Clean up input MergeMems created by transforming the slices
1133 _gvn.transform(_exits.merged_memory());
1134
1135 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1136 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1137 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1138 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1139 // If the type we set for the ret_phi in build_exits() is too optimistic and
1140 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1141 // loading. It could also be due to an error, so mark this method as not compilable because
1142 // otherwise this could lead to an infinite compile loop.
1143 // In any case, this code path is rarely (and never in my testing) reached.
1144 C->record_method_not_compilable("Can't determine return type.");
1145 return;
1146 }
1147 if (ret_type->isa_int()) {
1148 BasicType ret_bt = method()->return_type()->basic_type();
1149 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1150 }
1151 _exits.push_node(ret_type->basic_type(), ret_phi);
1152 }
1153
1154 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1155
1156 // Unlock along the exceptional paths.
1210
1211 //-----------------------------create_entry_map-------------------------------
1212 // Initialize our parser map to contain the types at method entry.
1213 // For OSR, the map contains a single RawPtr parameter.
1214 // Initial monitor locking for sync. methods is performed by do_method_entry.
1215 SafePointNode* Parse::create_entry_map() {
1216 // Check for really stupid bail-out cases.
1217 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1218 if (len >= 32760) {
1219 // Bailout expected, this is a very rare edge case.
1220 C->record_method_not_compilable("too many local variables");
1221 return nullptr;
1222 }
1223
1224 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1225 _caller->map()->delete_replaced_nodes();
1226
1227 // If this is an inlined method, we may have to do a receiver null check.
1228 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1229 GraphKit kit(_caller);
1230 Node* receiver = kit.argument(0);
1231 Node* null_free = kit.null_check_receiver_before_call(method());
1232 _caller = kit.transfer_exceptions_into_jvms();
1233
1234 if (kit.stopped()) {
1235 _exits.add_exception_states_from(_caller);
1236 _exits.set_jvms(_caller);
1237 return nullptr;
1238 }
1239 }
1240
1241 assert(method() != nullptr, "parser must have a method");
1242
1243 // Create an initial safepoint to hold JVM state during parsing
1244 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1245 set_map(new SafePointNode(len, jvms));
1246
1247 // Capture receiver info for compiled lambda forms.
1248 if (method()->is_compiled_lambda_form()) {
1249 ciInstance* recv_info = _caller->compute_receiver_info(method());
1250 jvms->set_receiver_info(recv_info);
1251 }
1252
1253 jvms->set_map(map());
1257 SafePointNode* inmap = _caller->map();
1258 assert(inmap != nullptr, "must have inmap");
1259 // In case of null check on receiver above
1260 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1261
1262 uint i;
1263
1264 // Pass thru the predefined input parameters.
1265 for (i = 0; i < TypeFunc::Parms; i++) {
1266 map()->init_req(i, inmap->in(i));
1267 }
1268
1269 if (depth() == 1) {
1270 assert(map()->memory()->Opcode() == Op_Parm, "");
1271 // Insert the memory aliasing node
1272 set_all_memory(reset_memory());
1273 }
1274 assert(merged_memory(), "");
1275
1276 // Now add the locals which are initially bound to arguments:
1277 uint arg_size = tf()->domain_sig()->cnt();
1278 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1279 for (i = TypeFunc::Parms; i < arg_size; i++) {
1280 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1281 }
1282
1283 // Clear out the rest of the map (locals and stack)
1284 for (i = arg_size; i < len; i++) {
1285 map()->init_req(i, top());
1286 }
1287
1288 SafePointNode* entry_map = stop();
1289 return entry_map;
1290 }
1291
1292 //-----------------------is_auto_boxed_primitive------------------------------
1293 // Helper method to detect auto-boxed primitives (result of valueOf() call).
1294 static bool is_auto_boxed_primitive(Node* n) {
1295 return (n->is_Proj() && n->as_Proj()->_con == TypeFunc::Parms &&
1296 n->in(0)->is_CallJava() &&
1297 n->in(0)->as_CallJava()->method()->is_boxing_method());
1298 }
1299
1300 //-----------------------------do_method_entry--------------------------------
1301 // Emit any code needed in the pseudo-block before BCI zero.
1302 // The main thing to do is lock the receiver of a synchronized method.
1303 void Parse::do_method_entry() {
1304 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1305 set_sp(0); // Java Stack Pointer
1306
1307 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1308
1309 // Check if we need a membar at the beginning of the java.lang.Object
1310 // constructor to satisfy the memory model for strict fields.
1311 if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1312 Node* receiver_obj = local(0);
1313 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1314 // If there's no exact type, check if the declared type has no implementors and add a dependency
1315 const TypeKlassPtr* klass_ptr = receiver_type->as_klass_type(/* try_for_exact= */ true);
1316 ciType* klass = klass_ptr->klass_is_exact() ? klass_ptr->exact_klass() : nullptr;
1317 if (klass != nullptr && klass->is_instance_klass()) {
1318 // Exact receiver type, check if there is a strict field
1319 ciInstanceKlass* holder = klass->as_instance_klass();
1320 for (int i = 0; i < holder->nof_nonstatic_fields(); i++) {
1321 ciField* field = holder->nonstatic_field_at(i);
1322 if (field->is_strict()) {
1323 // Found a strict field, a membar is needed
1324 AllocateNode* alloc = AllocateNode::Ideal_allocation(receiver_obj);
1325 insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease, receiver_obj);
1326 if (DoEscapeAnalysis && (alloc != nullptr)) {
1327 alloc->compute_MemBar_redundancy(method());
1328 }
1329 break;
1330 }
1331 }
1332 } else if (klass == nullptr) {
1333 // We can't statically determine the type of the receiver and therefore need
1334 // to put a membar here because it could have a strict field.
1335 insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease);
1336 }
1337 }
1338
1339 if (C->env()->dtrace_method_probes()) {
1340 make_dtrace_method_entry(method());
1341 }
1342
1343 if (StressReachabilityFences) {
1344 // Keep all oop arguments alive until the method returns as if there are
1345 // reachability fences for them at the end of the method.
1346 int max_locals = jvms()->loc_size();
1347 for (int idx = 0; idx < max_locals; idx++) {
1348 Node* loc = local(idx);
1349 if (loc->bottom_type()->isa_oopptr() != nullptr &&
1350 !is_auto_boxed_primitive(loc)) { // ignore auto-boxed primitives
1351 _stress_rf_hook->add_req(loc);
1352 }
1353 }
1354 }
1355
1356 #ifdef ASSERT
1357 // Narrow receiver type when it is too broad for the method being parsed.
1358 if (!method()->is_static()) {
1381
1382 // If the method is synchronized, we need to construct a lock node, attach
1383 // it to the Start node, and pin it there.
1384 if (method()->is_synchronized()) {
1385 // Insert a FastLockNode right after the Start which takes as arguments
1386 // the current thread pointer, the "this" pointer & the address of the
1387 // stack slot pair used for the lock. The "this" pointer is a projection
1388 // off the start node, but the locking spot has to be constructed by
1389 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1390 // becomes the second argument to the FastLockNode call. The
1391 // FastLockNode becomes the new control parent to pin it to the start.
1392
1393 // Setup Object Pointer
1394 Node *lock_obj = nullptr;
1395 if (method()->is_static()) {
1396 ciInstance* mirror = _method->holder()->java_mirror();
1397 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1398 lock_obj = makecon(t_lock);
1399 } else { // Else pass the "this" pointer,
1400 lock_obj = local(0); // which is Parm0 from StartNode
1401 assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1402 }
1403 // Clear out dead values from the debug info.
1404 kill_dead_locals();
1405 // Build the FastLockNode
1406 _synch_lock = shared_lock(lock_obj);
1407 // Check for bailout in shared_lock
1408 if (failing()) { return; }
1409 }
1410
1411 // Feed profiling data for parameters to the type system so it can
1412 // propagate it as speculative types
1413 record_profiled_parameters_for_speculation();
1414
1415 // More argument handling
1416 int arg_size = method()->arg_size();
1417 for (int i = 0; i < arg_size; i++) {
1418 Node* parm = local(i);
1419 const Type* t = _gvn.type(parm);
1420 if (t->is_inlinetypeptr()) {
1421 // If the parameter is a value object, try to scalarize it if we know that it is unrestricted (not early larval)
1422 // Parameters are non-larval except the receiver of a constructor, which must be an early larval object.
1423 if (!(i == 0 && method()->receiver_maybe_larval())) {
1424 // Create InlineTypeNode from the oop and replace the parameter
1425 Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass());
1426 replace_in_map(parm, vt);
1427 }
1428 } else if (UseTypeSpeculation && (i == (arg_size - 1)) && depth() == 1 && method()->has_vararg() && t->isa_aryptr()) {
1429 // Speculate on varargs Object array being the default array refined type. The assumption is
1430 // that a vararg method test(Object... o) is often called as test(o1, o2, o3). javac will
1431 // translate the call so that the caller will create a new default array of Object, put o1,
1432 // o2, o3 into the newly created array, then invoke the method test. This only makes sense if
1433 // the method we are parsing is the top-level method of the compilation unit. Otherwise, if
1434 // it is truly called according to our assumption, we must know the exact type of the
1435 // argument because the allocation happens inside the compilation unit.
1436 const TypePtr* spec_type = (t->speculative() != nullptr) ? t->speculative() : t->remove_speculative()->is_aryptr();
1437 ciSignature* method_signature = method()->signature();
1438 ciType* parm_citype = method_signature->type_at(method_signature->count() - 1);
1439 if (!parm_citype->is_obj_array_klass()) {
1440 continue;
1441 }
1442
1443 ciObjArrayKlass* spec_citype = ciObjArrayKlass::make(parm_citype->as_obj_array_klass()->element_klass(), true);
1444 const Type* improved_spec_type = TypeKlassPtr::make(spec_citype, Type::trust_interfaces)->as_instance_type();
1445 improved_spec_type = improved_spec_type->join(spec_type)->join(TypePtr::NOTNULL);
1446 if (improved_spec_type->empty()) {
1447 continue;
1448 }
1449
1450 const TypePtr* improved_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, improved_spec_type->is_ptr());
1451 improved_type = improved_type->join_speculative(t)->is_ptr();
1452 if (improved_type != t) {
1453 Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, improved_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
1454 replace_in_map(parm, cast);
1455 }
1456 }
1457 }
1458 }
1459
1460 //------------------------------init_blocks------------------------------------
1461 // Initialize our parser map to contain the types/monitors at method entry.
1462 void Parse::init_blocks() {
1463 // Create the blocks.
1464 _block_count = flow()->block_count();
1465 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1466
1467 // Initialize the structs.
1468 for (int rpo = 0; rpo < block_count(); rpo++) {
1469 Block* block = rpo_at(rpo);
1470 new(block) Block(this, rpo);
1471 }
1472
1473 // Collect predecessor and successor information.
1474 for (int rpo = 0; rpo < block_count(); rpo++) {
1475 Block* block = rpo_at(rpo);
1476 block->init_graph(this);
1477 }
1866 //--------------------handle_missing_successor---------------------------------
1867 void Parse::handle_missing_successor(int target_bci) {
1868 #ifndef PRODUCT
1869 Block* b = block();
1870 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1871 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1872 #endif
1873 ShouldNotReachHere();
1874 }
1875
1876 //--------------------------merge_common---------------------------------------
1877 void Parse::merge_common(Parse::Block* target, int pnum) {
1878 if (TraceOptoParse) {
1879 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1880 }
1881
1882 // Zap extra stack slots to top
1883 assert(sp() == target->start_sp(), "");
1884 clean_stack(sp());
1885
1886 // Check for merge conflicts involving inline types
1887 JVMState* old_jvms = map()->jvms();
1888 int old_bci = bci();
1889 JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1890 tmp_jvms->set_should_reexecute(true);
1891 tmp_jvms->bind_map(map());
1892 // Execution needs to restart a the next bytecode (entry of next
1893 // block)
1894 if (target->is_merged() ||
1895 pnum > PhiNode::Input ||
1896 target->is_handler() ||
1897 target->is_loop_head()) {
1898 set_parse_bci(target->start());
1899 for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1900 Node* n = map()->in(j); // Incoming change to target state.
1901 const Type* t = nullptr;
1902 ciType* ct = nullptr;
1903 if (tmp_jvms->is_loc(j)) {
1904 int loc_idx = j - tmp_jvms->locoff();
1905 t = target->local_type_at(loc_idx);
1906 ct = target->flow()->local_type_at(loc_idx);
1907 } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1908 int stk_idx = j - tmp_jvms->stkoff();
1909 t = target->stack_type_at(stk_idx);
1910 ct = target->flow()->stack_type_at(stk_idx);
1911 }
1912 if (t != nullptr && t != Type::BOTTOM) {
1913 // An object can appear in the JVMS as either an oop or an InlineTypeNode. If the merge is
1914 // an InlineTypeNode, we need all the merge inputs to be InlineTypeNodes. Else, if the
1915 // merge is an oop, each merge input needs to be either an oop or an buffered
1916 // InlineTypeNode.
1917 if (!t->is_inlinetypeptr()) {
1918 // The merge cannot be an InlineTypeNode, ensure the input is buffered if it is an
1919 // InlineTypeNode
1920 if (n->is_InlineType()) {
1921 map()->set_req(j, n->as_InlineType()->buffer(this));
1922 }
1923 } else {
1924 // Scalarize the value object if it is not larval
1925 if (!n->is_InlineType() && !ct->is_early_larval()) {
1926 assert(_gvn.type(n) == TypePtr::NULL_PTR, "must be a null constant");
1927 map()->set_req(j, InlineTypeNode::make_null(_gvn, t->inline_klass()));
1928 }
1929 }
1930 }
1931 }
1932 }
1933 old_jvms->bind_map(map());
1934 set_parse_bci(old_bci);
1935
1936 if (!target->is_merged()) { // No prior mapping at this bci
1937 if (TraceOptoParse) { tty->print(" with empty state"); }
1938
1939 // If this path is dead, do not bother capturing it as a merge.
1940 // It is "as if" we had 1 fewer predecessors from the beginning.
1941 if (stopped()) {
1942 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1943 return;
1944 }
1945
1946 // Make a region if we know there are multiple or unpredictable inputs.
1947 // (Also, if this is a plain fall-through, we might see another region,
1948 // which must not be allowed into this block's map.)
1949 if (pnum > PhiNode::Input // Known multiple inputs.
1950 || target->is_handler() // These have unpredictable inputs.
1951 || target->is_loop_head() // Known multiple inputs
1952 || control()->is_Region()) { // We must hide this guy.
1953
1954 int current_bci = bci();
1955 set_parse_bci(target->start()); // Set target bci
1970 record_for_igvn(r);
1971 // zap all inputs to null for debugging (done in Node(uint) constructor)
1972 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1973 r->init_req(pnum, control());
1974 set_control(r);
1975 target->copy_irreducible_status_to(r, jvms());
1976 set_parse_bci(current_bci); // Restore bci
1977 }
1978
1979 // Convert the existing Parser mapping into a mapping at this bci.
1980 store_state_to(target);
1981 assert(target->is_merged(), "do not come here twice");
1982
1983 } else { // Prior mapping at this bci
1984 if (TraceOptoParse) { tty->print(" with previous state"); }
1985 #ifdef ASSERT
1986 if (target->is_SEL_head()) {
1987 target->mark_merged_backedge(block());
1988 }
1989 #endif
1990
1991 // We must not manufacture more phis if the target is already parsed.
1992 bool nophi = target->is_parsed();
1993
1994 SafePointNode* newin = map();// Hang on to incoming mapping
1995 Block* save_block = block(); // Hang on to incoming block;
1996 load_state_from(target); // Get prior mapping
1997
1998 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1999 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
2000 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
2001 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
2002
2003 // Iterate over my current mapping and the old mapping.
2004 // Where different, insert Phi functions.
2005 // Use any existing Phi functions.
2006 assert(control()->is_Region(), "must be merging to a region");
2007 RegionNode* r = control()->as_Region();
2008
2009 // Compute where to merge into
2010 // Merge incoming control path
2011 r->init_req(pnum, newin->control());
2012
2013 if (pnum == 1) { // Last merge for this Region?
2014 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
2015 Node* result = _gvn.transform(r);
2016 if (r != result && TraceOptoParse) {
2017 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
2018 }
2019 }
2020 record_for_igvn(r);
2021 }
2022
2023 // Update all the non-control inputs to map:
2024 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
2025 bool check_elide_phi = target->is_SEL_backedge(save_block);
2026 bool last_merge = (pnum == PhiNode::Input);
2027 for (uint j = 1; j < newin->req(); j++) {
2028 Node* m = map()->in(j); // Current state of target.
2029 Node* n = newin->in(j); // Incoming change to target state.
2030 Node* phi;
2031 if (m->is_Phi() && m->as_Phi()->region() == r) {
2032 phi = m;
2033 } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
2034 phi = m;
2035 } else {
2036 phi = nullptr;
2037 }
2038 if (m != n) { // Different; must merge
2039 switch (j) {
2040 // Frame pointer and Return Address never changes
2041 case TypeFunc::FramePtr:// Drop m, use the original value
2042 case TypeFunc::ReturnAdr:
2043 break;
2044 case TypeFunc::Memory: // Merge inputs to the MergeMem node
2045 assert(phi == nullptr, "the merge contains phis, not vice versa");
2046 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
2047 continue;
2048 default: // All normal stuff
2049 if (phi == nullptr) {
2050 const JVMState* jvms = map()->jvms();
2051 if (EliminateNestedLocks &&
2052 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
2053 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
2054 // Use old BoxLock node as merged box.
2055 assert(newin->jvms()->is_monitor_box(j), "sanity");
2056 // This assert also tests that nodes are BoxLock.
2057 assert(BoxLockNode::same_slot(n, m), "sanity");
2064 // Incremental Inlining before EA and Macro nodes elimination.
2065 //
2066 // Incremental Inlining is executed after IGVN optimizations
2067 // during which BoxLock can be marked as Coarsened.
2068 old_box->set_coarsened(); // Verifies state
2069 old_box->set_unbalanced();
2070 }
2071 C->gvn_replace_by(n, m);
2072 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
2073 phi = ensure_phi(j, nophi);
2074 }
2075 }
2076 break;
2077 }
2078 }
2079 // At this point, n might be top if:
2080 // - there is no phi (because TypeFlow detected a conflict), or
2081 // - the corresponding control edges is top (a dead incoming path)
2082 // It is a bug if we create a phi which sees a garbage value on a live path.
2083
2084 // Merging two inline types?
2085 if (phi != nullptr && phi->is_InlineType()) {
2086 // Reload current state because it may have been updated by ensure_phi
2087 assert(phi == map()->in(j), "unexpected value in map");
2088 assert(phi->as_InlineType()->has_phi_inputs(r), "");
2089 InlineTypeNode* vtm = phi->as_InlineType(); // Current inline type
2090 InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
2091 assert(vtm == phi, "Inline type should have Phi input");
2092
2093 #ifdef ASSERT
2094 if (TraceOptoParse) {
2095 tty->print_cr("\nMerging inline types");
2096 tty->print_cr("Current:");
2097 vtm->dump(2);
2098 tty->print_cr("Incoming:");
2099 vtn->dump(2);
2100 tty->cr();
2101 }
2102 #endif
2103 // Do the merge
2104 vtm->merge_with(&_gvn, vtn, pnum, last_merge);
2105 if (last_merge) {
2106 map()->set_req(j, _gvn.transform(vtm));
2107 record_for_igvn(vtm);
2108 }
2109 } else if (phi != nullptr) {
2110 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
2111 assert(phi->as_Phi()->region() == r, "");
2112 phi->set_req(pnum, n); // Then add 'n' to the merge
2113 if (last_merge) {
2114 // Last merge for this Phi.
2115 // So far, Phis have had a reasonable type from ciTypeFlow.
2116 // Now _gvn will join that with the meet of current inputs.
2117 // BOTTOM is never permissible here, 'cause pessimistically
2118 // Phis of pointers cannot lose the basic pointer type.
2119 DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
2120 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
2121 map()->set_req(j, _gvn.transform(phi));
2122 DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
2123 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
2124 record_for_igvn(phi);
2125 }
2126 }
2127 } // End of for all values to be merged
2128
2129 if (last_merge && !r->in(0)) { // The occasional useless Region
2130 assert(control() == r, "");
2131 set_control(r->nonnull_req());
2132 }
2133
2134 map()->merge_replaced_nodes_with(newin);
2135
2136 // newin has been subsumed into the lazy merge, and is now dead.
2137 set_block(save_block);
2138
2139 stop(); // done with this guy, for now
2140 }
2141
2142 if (TraceOptoParse) {
2143 tty->print_cr(" on path %d", pnum);
2144 }
2145
2146 // Done with this parser state.
2147 assert(stopped(), "");
2148 }
2149
2261
2262 // Add new path to the region.
2263 uint pnum = r->req();
2264 r->add_req(nullptr);
2265
2266 for (uint i = 1; i < map->req(); i++) {
2267 Node* n = map->in(i);
2268 if (i == TypeFunc::Memory) {
2269 // Ensure a phi on all currently known memories.
2270 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2271 Node* phi = mms.memory();
2272 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2273 assert(phi->req() == pnum, "must be same size as region");
2274 phi->add_req(nullptr);
2275 }
2276 }
2277 } else {
2278 if (n->is_Phi() && n->as_Phi()->region() == r) {
2279 assert(n->req() == pnum, "must be same size as region");
2280 n->add_req(nullptr);
2281 } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2282 n->as_InlineType()->add_new_path(r);
2283 }
2284 }
2285 }
2286
2287 return pnum;
2288 }
2289
2290 //------------------------------ensure_phi-------------------------------------
2291 // Turn the idx'th entry of the current map into a Phi
2292 Node* Parse::ensure_phi(int idx, bool nocreate) {
2293 SafePointNode* map = this->map();
2294 Node* region = map->control();
2295 assert(region->is_Region(), "");
2296
2297 Node* o = map->in(idx);
2298 assert(o != nullptr, "");
2299
2300 if (o == top()) return nullptr; // TOP always merges into TOP
2301
2302 if (o->is_Phi() && o->as_Phi()->region() == region) {
2303 return o->as_Phi();
2304 }
2305 InlineTypeNode* vt = o->isa_InlineType();
2306 if (vt != nullptr && vt->has_phi_inputs(region)) {
2307 return vt;
2308 }
2309
2310 // Now use a Phi here for merging
2311 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2312 const JVMState* jvms = map->jvms();
2313 const Type* t = nullptr;
2314 if (jvms->is_loc(idx)) {
2315 t = block()->local_type_at(idx - jvms->locoff());
2316 } else if (jvms->is_stk(idx)) {
2317 t = block()->stack_type_at(idx - jvms->stkoff());
2318 } else if (jvms->is_mon(idx)) {
2319 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2320 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2321 } else if ((uint)idx < TypeFunc::Parms) {
2322 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2323 } else {
2324 assert(false, "no type information for this phi");
2325 }
2326
2327 // If the type falls to bottom, then this must be a local that
2328 // is already dead or is mixing ints and oops or some such.
2329 // Forcing it to top makes it go dead.
2330 if (t == Type::BOTTOM) {
2331 map->set_req(idx, top());
2332 return nullptr;
2333 }
2334
2335 // Do not create phis for top either.
2336 // A top on a non-null control flow must be an unused even after the.phi.
2337 if (t == Type::TOP || t == Type::HALF) {
2338 map->set_req(idx, top());
2339 return nullptr;
2340 }
2341
2342 if (vt != nullptr && t->is_inlinetypeptr()) {
2343 // Inline types are merged by merging their field values.
2344 // Create a cloned InlineTypeNode with phi inputs that
2345 // represents the merged inline type and update the map.
2346 vt = vt->clone_with_phis(&_gvn, region);
2347 map->set_req(idx, vt);
2348 return vt;
2349 } else {
2350 PhiNode* phi = PhiNode::make(region, o, t);
2351 gvn().set_type(phi, t);
2352 if (C->do_escape_analysis()) record_for_igvn(phi);
2353 map->set_req(idx, phi);
2354 return phi;
2355 }
2356 }
2357
2358 //--------------------------ensure_memory_phi----------------------------------
2359 // Turn the idx'th slice of the current memory into a Phi
2360 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2361 MergeMemNode* mem = merged_memory();
2362 Node* region = control();
2363 assert(region->is_Region(), "");
2364
2365 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2366 assert(o != nullptr && o != top(), "");
2367
2368 PhiNode* phi;
2369 if (o->is_Phi() && o->as_Phi()->region() == region) {
2370 phi = o->as_Phi();
2371 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2372 // clone the shared base memory phi to make a new memory split
2373 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2374 const Type* t = phi->bottom_type();
2375 const TypePtr* adr_type = C->get_adr_type(idx);
2465 // Add check to deoptimize once holder klass is fully initialized.
2466 void Parse::clinit_deopt() {
2467 assert(C->has_method(), "only for normal compilations");
2468 assert(depth() == 1, "only for main compiled method");
2469 assert(is_normal_parse(), "no barrier needed on osr entry");
2470 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2471
2472 set_parse_bci(0);
2473
2474 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2475 guard_klass_being_initialized(holder);
2476 }
2477
2478 //------------------------------return_current---------------------------------
2479 // Append current _map to _exit_return
2480 void Parse::return_current(Node* value) {
2481 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2482 call_register_finalizer();
2483 }
2484
2485 // frame pointer is always same, already captured
2486 if (value != nullptr) {
2487 Node* phi = _exits.argument(0);
2488 const Type* return_type = phi->bottom_type();
2489 const TypeInstPtr* tr = return_type->isa_instptr();
2490 if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2491 return_type->is_inlinetypeptr()) {
2492 // Inline type is returned as fields, make sure it is scalarized
2493 if (!value->is_InlineType()) {
2494 value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
2495 }
2496 if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2497 // Returning from root or an incrementally inlined method. Make sure all non-flat
2498 // fields are buffered and re-execute if allocation triggers deoptimization.
2499 PreserveReexecuteState preexecs(this);
2500 assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2501 jvms()->set_should_reexecute(true);
2502 inc_sp(1);
2503 value = value->as_InlineType()->allocate_fields(this);
2504 }
2505 } else if (value->is_InlineType()) {
2506 // Inline type is returned as oop, make sure it is buffered and re-execute
2507 // if allocation triggers deoptimization.
2508 PreserveReexecuteState preexecs(this);
2509 jvms()->set_should_reexecute(true);
2510 inc_sp(1);
2511 value = value->as_InlineType()->buffer(this);
2512 }
2513 // ...else
2514 // If returning oops to an interface-return, there is a silent free
2515 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2516 phi->add_req(value);
2517 }
2518
2519 if (StressReachabilityFences) {
2520 // Insert reachability fences for all oop arguments at the end of the method.
2521 for (uint i = 1; i < _stress_rf_hook->req(); i++) {
2522 Node* referent = _stress_rf_hook->in(i);
2523 assert(referent->bottom_type()->isa_oopptr(), "%s", Type::str(referent->bottom_type()));
2524 insert_reachability_fence(referent);
2525 }
2526 }
2527
2528 // Do not set_parse_bci, so that return goo is credited to the return insn.
2529 set_bci(InvocationEntryBci);
2530 if (method()->is_synchronized()) {
2531 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2532 }
2533 if (C->env()->dtrace_method_probes()) {
2534 make_dtrace_method_exit(method());
2535 }
2536
2537 SafePointNode* exit_return = _exits.map();
2538 exit_return->in( TypeFunc::Control )->add_req( control() );
2539 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2540 Node *mem = exit_return->in( TypeFunc::Memory );
2541 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2542 if (mms.is_empty()) {
2543 // get a copy of the base memory, and patch just this one input
2544 const TypePtr* adr_type = mms.adr_type(C);
2545 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2546 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2547 gvn().set_type_bottom(phi);
2548 phi->del_req(phi->req()-1); // prepare to re-patch
2549 mms.set_memory(phi);
2550 }
2551 mms.memory()->add_req(mms.memory2());
2552 }
2553
2554 if (_first_return) {
2555 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2556 _first_return = false;
2557 } else {
2558 _exits.map()->merge_replaced_nodes_with(map());
2559 }
2560
2561 stop_and_kill_map(); // This CFG path dies here
2562 }
2563
2564
2565 //------------------------------add_safepoint----------------------------------
2566 void Parse::add_safepoint() {
2567 uint parms = TypeFunc::Parms+1;
2568
2569 // Clear out dead values from the debug info.
2570 kill_dead_locals();
2571
2572 // Clone the JVM State
2573 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
|