401 }
402 if (dead->is_ParsePredicate()) {
403 remove_parse_predicate(dead->as_ParsePredicate());
404 }
405 if (dead->for_post_loop_opts_igvn()) {
406 remove_from_post_loop_opts_igvn(dead);
407 }
408 if (dead->for_merge_stores_igvn()) {
409 remove_from_merge_stores_igvn(dead);
410 }
411 if (dead->is_Call()) {
412 remove_useless_late_inlines( &_late_inlines, dead);
413 remove_useless_late_inlines( &_string_late_inlines, dead);
414 remove_useless_late_inlines( &_boxing_late_inlines, dead);
415 remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
416
417 if (dead->is_CallStaticJava()) {
418 remove_unstable_if_trap(dead->as_CallStaticJava(), false);
419 }
420 }
421 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
422 bs->unregister_potential_barrier_node(dead);
423 }
424
425 // Disconnect all useless nodes by disconnecting those at the boundary.
426 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist, const Unique_Node_List* root_and_safepoints) {
427 uint next = 0;
428 while (next < useful.size()) {
429 Node *n = useful.at(next++);
430 if (n->is_SafePoint()) {
431 // We're done with a parsing phase. Replaced nodes are not valid
432 // beyond that point.
433 n->as_SafePoint()->delete_replaced_nodes();
434 }
435 // Use raw traversal of out edges since this code removes out edges
436 int max = n->outcnt();
437 for (int j = 0; j < max; ++j) {
438 Node* child = n->raw_out(j);
439 if (!useful.member(child)) {
440 assert(!child->is_top() || child != top(),
441 "If top is cached in Compile object it is in useful list");
442 // Only need to remove this out-edge to the useless node
452 assert(useful.member(n->unique_out()), "do not push a useless node");
453 worklist.push(n->unique_out());
454 }
455 }
456
457 remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
458 remove_useless_nodes(_parse_predicates, useful); // remove useless Parse Predicate nodes
459 // Remove useless Template Assertion Predicate opaque nodes
460 remove_useless_nodes(_template_assertion_predicate_opaques, useful);
461 remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
462 remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
463 remove_useless_nodes(_for_merge_stores_igvn, useful); // remove useless node recorded for merge stores IGVN pass
464 remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps
465 remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes
466 #ifdef ASSERT
467 if (_modified_nodes != nullptr) {
468 _modified_nodes->remove_useless_nodes(useful.member_set());
469 }
470 #endif
471
472 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
473 bs->eliminate_useless_gc_barriers(useful, this);
474 // clean up the late inline lists
475 remove_useless_late_inlines( &_late_inlines, useful);
476 remove_useless_late_inlines( &_string_late_inlines, useful);
477 remove_useless_late_inlines( &_boxing_late_inlines, useful);
478 remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
479 DEBUG_ONLY(verify_graph_edges(true /*check for no_dead_code*/, root_and_safepoints);)
480 }
481
482 // ============================================================================
483 //------------------------------CompileWrapper---------------------------------
484 class CompileWrapper : public StackObj {
485 Compile *const _compile;
486 public:
487 CompileWrapper(Compile* compile);
488
489 ~CompileWrapper();
490 };
491
492 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
493 // the Compile* pointer is stored in the current ciEnv:
852 #ifndef PRODUCT
853 if (should_print_igv(1)) {
854 _igv_printer->print_inlining();
855 }
856 #endif
857
858 if (failing()) return;
859 NOT_PRODUCT( verify_graph_edges(); )
860
861 // Now optimize
862 Optimize();
863 if (failing()) return;
864 NOT_PRODUCT( verify_graph_edges(); )
865
866 #ifndef PRODUCT
867 if (should_print_ideal()) {
868 print_ideal_ir("PrintIdeal");
869 }
870 #endif
871
872 #ifdef ASSERT
873 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
874 bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
875 #endif
876
877 // Dump compilation data to replay it.
878 if (directive->DumpReplayOption) {
879 env()->dump_replay_data(_compile_id);
880 }
881 if (directive->DumpInlineOption && (ilt() != nullptr)) {
882 env()->dump_inline_data(_compile_id);
883 }
884
885 // Now that we know the size of all the monitors we can add a fixed slot
886 // for the original deopt pc.
887 int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
888 set_fixed_slots(next_slot);
889
890 // Compute when to use implicit null checks. Used by matching trap based
891 // nodes and NullCheck optimization.
892 set_allowed_deopt_reasons();
893
3226 break;
3227 }
3228 default:
3229 break;
3230 }
3231 }
3232
3233 #ifdef ASSERT
3234 if( n->is_Mem() ) {
3235 int alias_idx = get_alias_index(n->as_Mem()->adr_type());
3236 assert( n->in(0) != nullptr || alias_idx != Compile::AliasIdxRaw ||
3237 // oop will be recorded in oop map if load crosses safepoint
3238 (n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
3239 LoadNode::is_immutable_value(n->in(MemNode::Address)))),
3240 "raw memory operations should have control edge");
3241 }
3242 if (n->is_MemBar()) {
3243 MemBarNode* mb = n->as_MemBar();
3244 if (mb->trailing_store() || mb->trailing_load_store()) {
3245 assert(mb->leading_membar()->trailing_membar() == mb, "bad membar pair");
3246 Node* mem = BarrierSet::barrier_set()->barrier_set_c2()->step_over_gc_barrier(mb->in(MemBarNode::Precedent));
3247 assert((mb->trailing_store() && mem->is_Store() && mem->as_Store()->is_release()) ||
3248 (mb->trailing_load_store() && mem->is_LoadStore()), "missing mem op");
3249 } else if (mb->leading()) {
3250 assert(mb->trailing_membar()->leading_membar() == mb, "bad membar pair");
3251 }
3252 }
3253 #endif
3254 // Count FPU ops and common calls, implements item (3)
3255 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->final_graph_reshaping(this, n, nop, dead_nodes);
3256 if (!gc_handled) {
3257 final_graph_reshaping_main_switch(n, frc, nop, dead_nodes);
3258 }
3259
3260 // Collect CFG split points
3261 if (n->is_MultiBranch() && !n->is_RangeCheck()) {
3262 frc._tests.push(n);
3263 }
3264 }
3265
3266 void Compile::handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned) {
3267 if (!UseDivMod) {
3268 return;
3269 }
3270
3271 // Check if "a % b" and "a / b" both exist
3272 Node* d = n->find_similar(Op_DivIL(bt, is_unsigned));
3273 if (d == nullptr) {
3274 return;
3275 }
3276
3277 // Replace them with a fused divmod if supported
3278 if (Matcher::has_match_rule(Op_DivModIL(bt, is_unsigned))) {
|
401 }
402 if (dead->is_ParsePredicate()) {
403 remove_parse_predicate(dead->as_ParsePredicate());
404 }
405 if (dead->for_post_loop_opts_igvn()) {
406 remove_from_post_loop_opts_igvn(dead);
407 }
408 if (dead->for_merge_stores_igvn()) {
409 remove_from_merge_stores_igvn(dead);
410 }
411 if (dead->is_Call()) {
412 remove_useless_late_inlines( &_late_inlines, dead);
413 remove_useless_late_inlines( &_string_late_inlines, dead);
414 remove_useless_late_inlines( &_boxing_late_inlines, dead);
415 remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
416
417 if (dead->is_CallStaticJava()) {
418 remove_unstable_if_trap(dead->as_CallStaticJava(), false);
419 }
420 }
421 }
422
423 // Disconnect all useless nodes by disconnecting those at the boundary.
424 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist, const Unique_Node_List* root_and_safepoints) {
425 uint next = 0;
426 while (next < useful.size()) {
427 Node *n = useful.at(next++);
428 if (n->is_SafePoint()) {
429 // We're done with a parsing phase. Replaced nodes are not valid
430 // beyond that point.
431 n->as_SafePoint()->delete_replaced_nodes();
432 }
433 // Use raw traversal of out edges since this code removes out edges
434 int max = n->outcnt();
435 for (int j = 0; j < max; ++j) {
436 Node* child = n->raw_out(j);
437 if (!useful.member(child)) {
438 assert(!child->is_top() || child != top(),
439 "If top is cached in Compile object it is in useful list");
440 // Only need to remove this out-edge to the useless node
450 assert(useful.member(n->unique_out()), "do not push a useless node");
451 worklist.push(n->unique_out());
452 }
453 }
454
455 remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
456 remove_useless_nodes(_parse_predicates, useful); // remove useless Parse Predicate nodes
457 // Remove useless Template Assertion Predicate opaque nodes
458 remove_useless_nodes(_template_assertion_predicate_opaques, useful);
459 remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
460 remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
461 remove_useless_nodes(_for_merge_stores_igvn, useful); // remove useless node recorded for merge stores IGVN pass
462 remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps
463 remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes
464 #ifdef ASSERT
465 if (_modified_nodes != nullptr) {
466 _modified_nodes->remove_useless_nodes(useful.member_set());
467 }
468 #endif
469
470 // clean up the late inline lists
471 remove_useless_late_inlines( &_late_inlines, useful);
472 remove_useless_late_inlines( &_string_late_inlines, useful);
473 remove_useless_late_inlines( &_boxing_late_inlines, useful);
474 remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
475 DEBUG_ONLY(verify_graph_edges(true /*check for no_dead_code*/, root_and_safepoints);)
476 }
477
478 // ============================================================================
479 //------------------------------CompileWrapper---------------------------------
480 class CompileWrapper : public StackObj {
481 Compile *const _compile;
482 public:
483 CompileWrapper(Compile* compile);
484
485 ~CompileWrapper();
486 };
487
488 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
489 // the Compile* pointer is stored in the current ciEnv:
848 #ifndef PRODUCT
849 if (should_print_igv(1)) {
850 _igv_printer->print_inlining();
851 }
852 #endif
853
854 if (failing()) return;
855 NOT_PRODUCT( verify_graph_edges(); )
856
857 // Now optimize
858 Optimize();
859 if (failing()) return;
860 NOT_PRODUCT( verify_graph_edges(); )
861
862 #ifndef PRODUCT
863 if (should_print_ideal()) {
864 print_ideal_ir("PrintIdeal");
865 }
866 #endif
867
868 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
869
870 #ifdef ASSERT
871 bs->verify_gc_barriers(this, BarrierSetC2::AfterOptimize);
872 #endif
873
874 bs->final_refinement(this);
875
876 #ifdef ASSERT
877 bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
878 #endif
879
880 // Dump compilation data to replay it.
881 if (directive->DumpReplayOption) {
882 env()->dump_replay_data(_compile_id);
883 }
884 if (directive->DumpInlineOption && (ilt() != nullptr)) {
885 env()->dump_inline_data(_compile_id);
886 }
887
888 // Now that we know the size of all the monitors we can add a fixed slot
889 // for the original deopt pc.
890 int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
891 set_fixed_slots(next_slot);
892
893 // Compute when to use implicit null checks. Used by matching trap based
894 // nodes and NullCheck optimization.
895 set_allowed_deopt_reasons();
896
3229 break;
3230 }
3231 default:
3232 break;
3233 }
3234 }
3235
3236 #ifdef ASSERT
3237 if( n->is_Mem() ) {
3238 int alias_idx = get_alias_index(n->as_Mem()->adr_type());
3239 assert( n->in(0) != nullptr || alias_idx != Compile::AliasIdxRaw ||
3240 // oop will be recorded in oop map if load crosses safepoint
3241 (n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
3242 LoadNode::is_immutable_value(n->in(MemNode::Address)))),
3243 "raw memory operations should have control edge");
3244 }
3245 if (n->is_MemBar()) {
3246 MemBarNode* mb = n->as_MemBar();
3247 if (mb->trailing_store() || mb->trailing_load_store()) {
3248 assert(mb->leading_membar()->trailing_membar() == mb, "bad membar pair");
3249 Node* mem = mb->in(MemBarNode::Precedent);
3250 assert((mb->trailing_store() && mem->is_Store() && mem->as_Store()->is_release()) ||
3251 (mb->trailing_load_store() && mem->is_LoadStore()), "missing mem op");
3252 } else if (mb->leading()) {
3253 assert(mb->trailing_membar()->leading_membar() == mb, "bad membar pair");
3254 }
3255 }
3256 #endif
3257 // Count FPU ops and common calls, implements item (3)
3258 final_graph_reshaping_main_switch(n, frc, nop, dead_nodes);
3259
3260 // Collect CFG split points
3261 if (n->is_MultiBranch() && !n->is_RangeCheck()) {
3262 frc._tests.push(n);
3263 }
3264 }
3265
3266 void Compile::handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned) {
3267 if (!UseDivMod) {
3268 return;
3269 }
3270
3271 // Check if "a % b" and "a / b" both exist
3272 Node* d = n->find_similar(Op_DivIL(bt, is_unsigned));
3273 if (d == nullptr) {
3274 return;
3275 }
3276
3277 // Replace them with a fused divmod if supported
3278 if (Matcher::has_match_rule(Op_DivModIL(bt, is_unsigned))) {
|