9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "interpreter/linkResolver.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/method.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/c2compiler.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/idealGraphPrinter.hpp"
33 #include "opto/locknode.hpp"
34 #include "opto/memnode.hpp"
35 #include "opto/opaquenode.hpp"
36 #include "opto/parse.hpp"
37 #include "opto/rootnode.hpp"
38 #include "opto/runtime.hpp"
39 #include "opto/type.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/safepointMechanism.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "utilities/bitMap.inline.hpp"
44 #include "utilities/copy.hpp"
45
46 // Static array so we can figure out which bytecodes stop us from compiling
47 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
48 // and eventually should be encapsulated in a proper class (gri 8/18/98).
49
50 #ifndef PRODUCT
51 uint nodes_created = 0;
52 uint methods_parsed = 0;
53 uint methods_seen = 0;
54 uint blocks_parsed = 0;
55 uint blocks_seen = 0;
56
57 uint explicit_null_checks_inserted = 0;
58 uint explicit_null_checks_elided = 0;
59 uint all_null_checks_found = 0;
60 uint implicit_null_checks = 0;
1115 _exits.map()->apply_replaced_nodes(_new_idx);
1116 }
1117
1118 //-----------------------------create_entry_map-------------------------------
1119 // Initialize our parser map to contain the types at method entry.
1120 // For OSR, the map contains a single RawPtr parameter.
1121 // Initial monitor locking for sync. methods is performed by do_method_entry.
1122 SafePointNode* Parse::create_entry_map() {
1123 // Check for really stupid bail-out cases.
1124 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1125 if (len >= 32760) {
1126 // Bailout expected, this is a very rare edge case.
1127 C->record_method_not_compilable("too many local variables");
1128 return nullptr;
1129 }
1130
1131 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1132 _caller->map()->delete_replaced_nodes();
1133
1134 // If this is an inlined method, we may have to do a receiver null check.
1135 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1136 GraphKit kit(_caller);
1137 kit.null_check_receiver_before_call(method());
1138 _caller = kit.transfer_exceptions_into_jvms();
1139 if (kit.stopped()) {
1140 _exits.add_exception_states_from(_caller);
1141 _exits.set_jvms(_caller);
1142 return nullptr;
1143 }
1144 }
1145
1146 assert(method() != nullptr, "parser must have a method");
1147
1148 // Create an initial safepoint to hold JVM state during parsing
1149 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1150 set_map(new SafePointNode(len, jvms));
1151
1152 // Capture receiver info for compiled lambda forms.
1153 if (method()->is_compiled_lambda_form()) {
1154 ciInstance* recv_info = _caller->compute_receiver_info(method());
1155 jvms->set_receiver_info(recv_info);
1156 }
1157
1177 set_all_memory(reset_memory());
1178 }
1179 assert(merged_memory(), "");
1180
1181 // Now add the locals which are initially bound to arguments:
1182 uint arg_size = tf()->domain()->cnt();
1183 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1184 for (i = TypeFunc::Parms; i < arg_size; i++) {
1185 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1186 }
1187
1188 // Clear out the rest of the map (locals and stack)
1189 for (i = arg_size; i < len; i++) {
1190 map()->init_req(i, top());
1191 }
1192
1193 SafePointNode* entry_map = stop();
1194 return entry_map;
1195 }
1196
1197 //-----------------------------do_method_entry--------------------------------
1198 // Emit any code needed in the pseudo-block before BCI zero.
1199 // The main thing to do is lock the receiver of a synchronized method.
1200 void Parse::do_method_entry() {
1201 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1202 set_sp(0); // Java Stack Pointer
1203
1204 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1205
1206 if (C->env()->dtrace_method_probes()) {
1207 make_dtrace_method_entry(method());
1208 }
1209
1210 #ifdef ASSERT
1211 // Narrow receiver type when it is too broad for the method being parsed.
1212 if (!method()->is_static()) {
1213 ciInstanceKlass* callee_holder = method()->holder();
1214 const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1215
1216 Node* receiver_obj = local(0);
1217 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1218
1219 if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1220 // Receiver should always be a subtype of callee holder.
1221 // But, since C2 type system doesn't properly track interfaces,
1222 // the invariant can't be expressed in the type system for default methods.
1223 // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1224 assert(callee_holder->is_interface(), "missing subtype check");
1225
1226 // Perform dynamic receiver subtype check against callee holder class w/ a halt on failure.
1227 Node* holder_klass = _gvn.makecon(TypeKlassPtr::make(callee_holder, Type::trust_interfaces));
1228 Node* not_subtype_ctrl = gen_subtype_check(receiver_obj, holder_klass);
1229 assert(!stopped(), "not a subtype");
1247 // Setup Object Pointer
1248 Node *lock_obj = nullptr;
1249 if (method()->is_static()) {
1250 ciInstance* mirror = _method->holder()->java_mirror();
1251 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1252 lock_obj = makecon(t_lock);
1253 } else { // Else pass the "this" pointer,
1254 lock_obj = local(0); // which is Parm0 from StartNode
1255 }
1256 // Clear out dead values from the debug info.
1257 kill_dead_locals();
1258 // Build the FastLockNode
1259 _synch_lock = shared_lock(lock_obj);
1260 // Check for bailout in shared_lock
1261 if (failing()) { return; }
1262 }
1263
1264 // Feed profiling data for parameters to the type system so it can
1265 // propagate it as speculative types
1266 record_profiled_parameters_for_speculation();
1267 }
1268
1269 //------------------------------init_blocks------------------------------------
1270 // Initialize our parser map to contain the types/monitors at method entry.
1271 void Parse::init_blocks() {
1272 // Create the blocks.
1273 _block_count = flow()->block_count();
1274 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1275
1276 // Initialize the structs.
1277 for (int rpo = 0; rpo < block_count(); rpo++) {
1278 Block* block = rpo_at(rpo);
1279 new(block) Block(this, rpo);
1280 }
1281
1282 // Collect predecessor and successor information.
1283 for (int rpo = 0; rpo < block_count(); rpo++) {
1284 Block* block = rpo_at(rpo);
1285 block->init_graph(this);
1286 }
1575 // to produce successors for trapping blocks.
1576 int trap_index = block()->flow()->trap_index();
1577 assert(trap_index != 0, "trap index must be valid");
1578 uncommon_trap(trap_index);
1579 break;
1580 }
1581
1582 NOT_PRODUCT( parse_histogram()->set_initial_state(bc()); );
1583
1584 #ifdef ASSERT
1585 int pre_bc_sp = sp();
1586 int inputs, depth;
1587 bool have_se = !stopped() && compute_stack_effects(inputs, depth);
1588 assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs);
1589 #endif //ASSERT
1590
1591 do_one_bytecode();
1592 if (failing()) return;
1593
1594 assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth,
1595 "incorrect depth prediction: sp=%d, pre_bc_sp=%d, depth=%d", sp(), pre_bc_sp, depth);
1596
1597 do_exceptions();
1598
1599 NOT_PRODUCT( parse_histogram()->record_change(); );
1600
1601 if (log != nullptr)
1602 log->clear_context(); // skip marker if nothing was printed
1603
1604 // Fall into next bytecode. Each bytecode normally has 1 sequential
1605 // successor which is typically made ready by visiting this bytecode.
1606 // If the successor has several predecessors, then it is a merge
1607 // point, starts a new basic block, and is handled like other basic blocks.
1608 }
1609 }
1610
1611
1612 //------------------------------merge------------------------------------------
1613 void Parse::set_parse_bci(int bci) {
1614 set_bci(bci);
1615 Node_Notes* nn = C->default_node_notes();
2157
2158 Node* fast_io = call->in(TypeFunc::I_O);
2159 Node* fast_mem = call->in(TypeFunc::Memory);
2160 // These two phis are pre-filled with copies of of the fast IO and Memory
2161 Node* io_phi = PhiNode::make(result_rgn, fast_io, Type::ABIO);
2162 Node* mem_phi = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
2163
2164 result_rgn->init_req(2, control());
2165 io_phi ->init_req(2, i_o());
2166 mem_phi ->init_req(2, reset_memory());
2167
2168 set_all_memory( _gvn.transform(mem_phi) );
2169 set_i_o( _gvn.transform(io_phi) );
2170 }
2171
2172 set_control( _gvn.transform(result_rgn) );
2173 }
2174
2175 // Add check to deoptimize once holder klass is fully initialized.
2176 void Parse::clinit_deopt() {
2177 assert(C->has_method(), "only for normal compilations");
2178 assert(depth() == 1, "only for main compiled method");
2179 assert(is_normal_parse(), "no barrier needed on osr entry");
2180 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2181
2182 set_parse_bci(0);
2183
2184 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2185 guard_klass_being_initialized(holder);
2186 }
2187
2188 //------------------------------return_current---------------------------------
2189 // Append current _map to _exit_return
2190 void Parse::return_current(Node* value) {
2191 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2192 call_register_finalizer();
2193 }
2194
2195 // Do not set_parse_bci, so that return goo is credited to the return insn.
2196 set_bci(InvocationEntryBci);
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "interpreter/linkResolver.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/method.hpp"
29 #include "oops/trainingData.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/c2compiler.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/idealGraphPrinter.hpp"
34 #include "opto/locknode.hpp"
35 #include "opto/memnode.hpp"
36 #include "opto/opaquenode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/type.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/runtimeUpcalls.hpp"
43 #include "runtime/safepointMechanism.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/bitMap.inline.hpp"
46 #include "utilities/copy.hpp"
47
48 // Static array so we can figure out which bytecodes stop us from compiling
49 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
50 // and eventually should be encapsulated in a proper class (gri 8/18/98).
51
52 #ifndef PRODUCT
53 uint nodes_created = 0;
54 uint methods_parsed = 0;
55 uint methods_seen = 0;
56 uint blocks_parsed = 0;
57 uint blocks_seen = 0;
58
59 uint explicit_null_checks_inserted = 0;
60 uint explicit_null_checks_elided = 0;
61 uint all_null_checks_found = 0;
62 uint implicit_null_checks = 0;
1117 _exits.map()->apply_replaced_nodes(_new_idx);
1118 }
1119
1120 //-----------------------------create_entry_map-------------------------------
1121 // Initialize our parser map to contain the types at method entry.
1122 // For OSR, the map contains a single RawPtr parameter.
1123 // Initial monitor locking for sync. methods is performed by do_method_entry.
1124 SafePointNode* Parse::create_entry_map() {
1125 // Check for really stupid bail-out cases.
1126 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1127 if (len >= 32760) {
1128 // Bailout expected, this is a very rare edge case.
1129 C->record_method_not_compilable("too many local variables");
1130 return nullptr;
1131 }
1132
1133 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1134 _caller->map()->delete_replaced_nodes();
1135
1136 // If this is an inlined method, we may have to do a receiver null check.
1137 if (_caller->has_method() && is_normal_parse()) {
1138 GraphKit kit(_caller);
1139 if (!method()->is_static()) {
1140 kit.null_check_receiver_before_call(method());
1141 } else if (C->do_clinit_barriers() && C->needs_clinit_barrier(method()->holder(), _caller->method())) {
1142 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1143 const int nargs = declared_method->arg_size();
1144 kit.inc_sp(nargs);
1145 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
1146 kit.guard_klass_is_initialized(holder);
1147 kit.dec_sp(nargs);
1148 }
1149 _caller = kit.transfer_exceptions_into_jvms();
1150 if (kit.stopped()) {
1151 _exits.add_exception_states_from(_caller);
1152 _exits.set_jvms(_caller);
1153 return nullptr;
1154 }
1155 }
1156
1157 assert(method() != nullptr, "parser must have a method");
1158
1159 // Create an initial safepoint to hold JVM state during parsing
1160 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1161 set_map(new SafePointNode(len, jvms));
1162
1163 // Capture receiver info for compiled lambda forms.
1164 if (method()->is_compiled_lambda_form()) {
1165 ciInstance* recv_info = _caller->compute_receiver_info(method());
1166 jvms->set_receiver_info(recv_info);
1167 }
1168
1188 set_all_memory(reset_memory());
1189 }
1190 assert(merged_memory(), "");
1191
1192 // Now add the locals which are initially bound to arguments:
1193 uint arg_size = tf()->domain()->cnt();
1194 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1195 for (i = TypeFunc::Parms; i < arg_size; i++) {
1196 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1197 }
1198
1199 // Clear out the rest of the map (locals and stack)
1200 for (i = arg_size; i < len; i++) {
1201 map()->init_req(i, top());
1202 }
1203
1204 SafePointNode* entry_map = stop();
1205 return entry_map;
1206 }
1207
1208 #if INCLUDE_CDS
1209 static int scale_limit(int64_t limit) {
1210 // To scale invocation limit a hyperbolic saturation curve formula
1211 // is used with upper limit 100K.
1212 return (int)(AOTCodeInvokeBase + limit / (1.0 + limit / (100000.0 * AOTCodeInvokeScale)));
1213 }
1214
1215 void Parse::count_aot_code_calls() {
1216 bool is_aot_compilation = C->env()->is_precompile();
1217 if (UseAOTCodeCounters && (depth() == 1) && (AOTRecordTraining || is_aot_compilation)) {
1218 // Count nmethod invocations during training run and compare to
1219 // invocations of AOT code during production run to trigger JIT compilation
1220 // and replace AOT code with normal JITed code.
1221 ciMetadata* mcp = method()->ensure_method_counters();
1222 precond(mcp != nullptr);
1223 const TypePtr* mc_type = TypeMetadataPtr::make(TypePtr::Constant, mcp, 0);
1224 Node* mc = makecon(mc_type);
1225 if (!is_aot_compilation) { // training
1226 // Count C2 compiled code invocations (use 64 bits)
1227 Node* cnt_adr = basic_plus_adr(C->top(), mc, in_bytes(MethodCounters::jit_code_invocation_counter_offset()));
1228 Node* ctrl = control();
1229 Node* cnt = make_load(ctrl, cnt_adr, TypeLong::LONG, T_LONG, MemNode::unordered);
1230 Node* incr = _gvn.transform(new AddLNode(cnt, longcon(1)));
1231 store_to_memory(ctrl, cnt_adr, incr, T_LONG, MemNode::unordered);
1232
1233 } else { // assembly phase
1234 // Clear out dead values from the debug info in following runtime call
1235 kill_dead_locals();
1236
1237 precond(MethodTrainingData::have_data());
1238 methodHandle mh(Thread::current(), method()->get_Method());
1239 MethodTrainingData* mtd = MethodTrainingData::find_fast(mh);
1240 precond(mtd != nullptr);
1241 int64_t limit = mtd->aot_code_invocation_limit();
1242 int scaled_limit = scale_limit(limit);
1243 Node* lim = intcon(scaled_limit);
1244
1245 // Count AOT compiled code invocations (use 32 bits because scaled limit fits into 32 bits)
1246 Node* cnt_adr = basic_plus_adr(C->top(), mc, in_bytes(MethodCounters::aot_code_invocation_counter_offset()));
1247 Node* ctrl = control();
1248 Node* cnt = make_load(ctrl, cnt_adr, TypeInt::INT, T_INT, MemNode::unordered);
1249 Node* incr = _gvn.transform(new AddINode(cnt, intcon(1)));
1250 store_to_memory(ctrl, cnt_adr, incr, T_INT, MemNode::unordered);
1251 // Preserve memory for Phi node below
1252 Node* st_mem = MergeMemNode::make(map()->memory());
1253 _gvn.set_type_bottom(st_mem);
1254
1255 Node* chk = _gvn.transform( new CmpINode(incr, lim) );
1256 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::lt) );
1257 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, (float)limit);
1258
1259 RegionNode* result_rgn = new RegionNode(4);
1260 record_for_igvn(result_rgn);
1261
1262 Node* skip_call = _gvn.transform(new IfTrueNode(iff));
1263 result_rgn->init_req(1, skip_call);
1264
1265 Node* in1_io = i_o();
1266 Node* in1_mem = st_mem;
1267 // These two phis are pre-filled with copies of the fast IO and Memory
1268 Node* io_phi = PhiNode::make(result_rgn, in1_io, Type::ABIO);
1269 Node* mem_phi = PhiNode::make(result_rgn, in1_mem, Type::MEMORY, TypePtr::BOTTOM);
1270
1271 Node* needs_call = _gvn.transform(new IfFalseNode(iff));
1272 set_control(needs_call);
1273
1274 // Check if we already requested compilation.
1275 ByteSize flag_offset = MethodCounters::aot_code_recompile_requested_offset();
1276 Node* flag_adr = basic_plus_adr(C->top(), mc, in_bytes(flag_offset));
1277
1278 // Load old value to check and store new (+1) unconditionally.
1279 // It is fine if few threads see initial 0 value and request compilation:
1280 // CompileBroker checks if such compilation is already in compilation queue.
1281 Node* old_val = make_load(control(), flag_adr, TypeInt::INT, T_INT, MemNode::unordered);
1282 Node* new_val = _gvn.transform(new AddINode(old_val, intcon(1)));
1283 store_to_memory(control(), flag_adr, new_val, T_INT, MemNode::unordered);
1284
1285 Node* chk2 = _gvn.transform( new CmpINode(old_val, intcon(0)) );
1286 Node* tst2 = _gvn.transform( new BoolNode(chk2, BoolTest::ne) );
1287 IfNode* iff2 = create_and_map_if(control(), tst2, PROB_FAIR, COUNT_UNKNOWN);
1288
1289 Node* skip_call2 = _gvn.transform(new IfTrueNode(iff2));
1290 result_rgn->init_req(2, skip_call2);
1291
1292 Node* needs_call2 = _gvn.transform(new IfFalseNode(iff2));
1293 set_control(needs_call2);
1294
1295 const TypePtr* m_type = TypeMetadataPtr::make(method());
1296 Node* m = makecon(m_type);
1297 Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
1298 OptoRuntime::compile_method_Type(),
1299 OptoRuntime::compile_method_Java(),
1300 "compile_method", TypePtr::BOTTOM, m);
1301
1302 // State before call
1303 Node* in_io = call->in(TypeFunc::I_O);
1304 Node* in_mem = call->in(TypeFunc::Memory);
1305 io_phi ->init_req(2, in_io);
1306 mem_phi->init_req(2, in_mem);
1307
1308 // State after call
1309 result_rgn->init_req(3, control());
1310 io_phi ->init_req(3, i_o());
1311 mem_phi->init_req(3, reset_memory());
1312
1313 set_all_memory( _gvn.transform(mem_phi) );
1314 set_i_o( _gvn.transform(io_phi) );
1315 set_control( _gvn.transform(result_rgn) );
1316 }
1317 }
1318 }
1319 #endif
1320
1321 //-----------------------------do_method_entry--------------------------------
1322 // Emit any code needed in the pseudo-block before BCI zero.
1323 // The main thing to do is lock the receiver of a synchronized method.
1324 void Parse::do_method_entry() {
1325 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1326 set_sp(0); // Java Stack Pointer
1327
1328 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1329
1330 if (C->env()->dtrace_method_probes()) {
1331 make_dtrace_method_entry(method());
1332 }
1333
1334 install_on_method_entry_runtime_upcalls(method());
1335
1336 #ifdef ASSERT
1337 // Narrow receiver type when it is too broad for the method being parsed.
1338 if (!method()->is_static()) {
1339 ciInstanceKlass* callee_holder = method()->holder();
1340 const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1341
1342 Node* receiver_obj = local(0);
1343 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1344
1345 if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1346 // Receiver should always be a subtype of callee holder.
1347 // But, since C2 type system doesn't properly track interfaces,
1348 // the invariant can't be expressed in the type system for default methods.
1349 // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1350 assert(callee_holder->is_interface(), "missing subtype check");
1351
1352 // Perform dynamic receiver subtype check against callee holder class w/ a halt on failure.
1353 Node* holder_klass = _gvn.makecon(TypeKlassPtr::make(callee_holder, Type::trust_interfaces));
1354 Node* not_subtype_ctrl = gen_subtype_check(receiver_obj, holder_klass);
1355 assert(!stopped(), "not a subtype");
1373 // Setup Object Pointer
1374 Node *lock_obj = nullptr;
1375 if (method()->is_static()) {
1376 ciInstance* mirror = _method->holder()->java_mirror();
1377 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1378 lock_obj = makecon(t_lock);
1379 } else { // Else pass the "this" pointer,
1380 lock_obj = local(0); // which is Parm0 from StartNode
1381 }
1382 // Clear out dead values from the debug info.
1383 kill_dead_locals();
1384 // Build the FastLockNode
1385 _synch_lock = shared_lock(lock_obj);
1386 // Check for bailout in shared_lock
1387 if (failing()) { return; }
1388 }
1389
1390 // Feed profiling data for parameters to the type system so it can
1391 // propagate it as speculative types
1392 record_profiled_parameters_for_speculation();
1393
1394 CDS_ONLY( count_aot_code_calls(); )
1395 }
1396
1397 //------------------------------init_blocks------------------------------------
1398 // Initialize our parser map to contain the types/monitors at method entry.
1399 void Parse::init_blocks() {
1400 // Create the blocks.
1401 _block_count = flow()->block_count();
1402 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1403
1404 // Initialize the structs.
1405 for (int rpo = 0; rpo < block_count(); rpo++) {
1406 Block* block = rpo_at(rpo);
1407 new(block) Block(this, rpo);
1408 }
1409
1410 // Collect predecessor and successor information.
1411 for (int rpo = 0; rpo < block_count(); rpo++) {
1412 Block* block = rpo_at(rpo);
1413 block->init_graph(this);
1414 }
1703 // to produce successors for trapping blocks.
1704 int trap_index = block()->flow()->trap_index();
1705 assert(trap_index != 0, "trap index must be valid");
1706 uncommon_trap(trap_index);
1707 break;
1708 }
1709
1710 NOT_PRODUCT( parse_histogram()->set_initial_state(bc()); );
1711
1712 #ifdef ASSERT
1713 int pre_bc_sp = sp();
1714 int inputs, depth;
1715 bool have_se = !stopped() && compute_stack_effects(inputs, depth);
1716 assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs);
1717 #endif //ASSERT
1718
1719 do_one_bytecode();
1720 if (failing()) return;
1721
1722 assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth,
1723 "incorrect depth prediction: bc=%s bci=%d, sp=%d, pre_bc_sp=%d, depth=%d", Bytecodes::name(bc()), bci(), sp(), pre_bc_sp, depth);
1724
1725 do_exceptions();
1726
1727 NOT_PRODUCT( parse_histogram()->record_change(); );
1728
1729 if (log != nullptr)
1730 log->clear_context(); // skip marker if nothing was printed
1731
1732 // Fall into next bytecode. Each bytecode normally has 1 sequential
1733 // successor which is typically made ready by visiting this bytecode.
1734 // If the successor has several predecessors, then it is a merge
1735 // point, starts a new basic block, and is handled like other basic blocks.
1736 }
1737 }
1738
1739
1740 //------------------------------merge------------------------------------------
1741 void Parse::set_parse_bci(int bci) {
1742 set_bci(bci);
1743 Node_Notes* nn = C->default_node_notes();
2285
2286 Node* fast_io = call->in(TypeFunc::I_O);
2287 Node* fast_mem = call->in(TypeFunc::Memory);
2288 // These two phis are pre-filled with copies of of the fast IO and Memory
2289 Node* io_phi = PhiNode::make(result_rgn, fast_io, Type::ABIO);
2290 Node* mem_phi = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
2291
2292 result_rgn->init_req(2, control());
2293 io_phi ->init_req(2, i_o());
2294 mem_phi ->init_req(2, reset_memory());
2295
2296 set_all_memory( _gvn.transform(mem_phi) );
2297 set_i_o( _gvn.transform(io_phi) );
2298 }
2299
2300 set_control( _gvn.transform(result_rgn) );
2301 }
2302
2303 // Add check to deoptimize once holder klass is fully initialized.
2304 void Parse::clinit_deopt() {
2305 if (method()->holder()->is_initialized()) {
2306 return; // in case do_clinit_barriers() is true
2307 }
2308 assert(C->has_method(), "only for normal compilations");
2309 assert(depth() == 1, "only for main compiled method");
2310 assert(is_normal_parse(), "no barrier needed on osr entry");
2311 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2312
2313 set_parse_bci(0);
2314
2315 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2316 guard_klass_being_initialized(holder);
2317 }
2318
2319 //------------------------------return_current---------------------------------
2320 // Append current _map to _exit_return
2321 void Parse::return_current(Node* value) {
2322 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2323 call_register_finalizer();
2324 }
2325
2326 // Do not set_parse_bci, so that return goo is credited to the return insn.
2327 set_bci(InvocationEntryBci);
|