< prev index next >

src/hotspot/share/c1/c1_LinearScan.cpp

Print this page

1223   switch (op->code()) {
1224     case lir_move:      // fall through
1225     case lir_convert: {
1226       assert(op->as_Op1() != NULL, "lir_move, lir_convert must be LIR_Op1");
1227       LIR_Op1* move = (LIR_Op1*)op;
1228 
1229       LIR_Opr move_from = move->in_opr();
1230       LIR_Opr move_to = move->result_opr();
1231 
1232       if (move_to->is_register() && move_from->is_register()) {
1233         Interval* from = interval_at(reg_num(move_from));
1234         Interval* to = interval_at(reg_num(move_to));
1235         if (from != NULL && to != NULL) {
1236           to->set_register_hint(from);
1237           TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", move->id(), from->reg_num(), to->reg_num()));
1238         }
1239       }
1240       break;
1241     }
1242     case lir_cmove: {
1243       assert(op->as_Op2() != NULL, "lir_cmove must be LIR_Op2");
1244       LIR_Op2* cmove = (LIR_Op2*)op;
1245 
1246       LIR_Opr move_from = cmove->in_opr1();
1247       LIR_Opr move_to = cmove->result_opr();
1248 
1249       if (move_to->is_register() && move_from->is_register()) {
1250         Interval* from = interval_at(reg_num(move_from));
1251         Interval* to = interval_at(reg_num(move_to));
1252         if (from != NULL && to != NULL) {
1253           to->set_register_hint(from);
1254           TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", cmove->id(), from->reg_num(), to->reg_num()));
1255         }
1256       }
1257       break;
1258     }
1259     default:
1260       break;
1261   }
1262 }
1263 
1264 
1265 void LinearScan::build_intervals() {
1266   TIME_LINEAR_SCAN(timer_build_intervals);
1267 

3114 
3115   sort_intervals_after_allocation();
3116 
3117   DEBUG_ONLY(verify());
3118 
3119   eliminate_spill_moves();
3120   assign_reg_num();
3121   CHECK_BAILOUT();
3122 
3123   NOT_PRODUCT(print_lir(2, "LIR after assignment of register numbers:"));
3124   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_after_asign));
3125 
3126   { TIME_LINEAR_SCAN(timer_allocate_fpu_stack);
3127 
3128     if (use_fpu_stack_allocation()) {
3129       allocate_fpu_stack(); // Only has effect on Intel
3130       NOT_PRODUCT(print_lir(2, "LIR after FPU stack allocation:"));
3131     }
3132   }
3133 



3134   { TIME_LINEAR_SCAN(timer_optimize_lir);
3135 
3136     EdgeMoveOptimizer::optimize(ir()->code());
3137     ControlFlowOptimizer::optimize(ir()->code());
3138     // check that cfg is still correct after optimizations
3139     ir()->verify();
3140   }

3141 
3142   NOT_PRODUCT(print_lir(1, "Before Code Generation", false));
3143   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_final));
3144   NOT_PRODUCT(_total_timer.end_method(this));
3145 }
3146 
3147 
3148 // ********** Printing functions
3149 
3150 #ifndef PRODUCT
3151 
3152 void LinearScan::print_timers(double total) {
3153   _total_timer.print(total);
3154 }
3155 
3156 void LinearScan::print_statistics() {
3157   _stat_before_alloc.print("before allocation");
3158   _stat_after_asign.print("after assignment of register");
3159   _stat_final.print("after optimization");
3160 }

6344       if (last_branch->info() == NULL) {
6345         if (last_branch->block() == code->at(i + 1)) {
6346 
6347           TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id()));
6348 
6349           // delete last branch instruction
6350           instructions->trunc_to(instructions->length() - 1);
6351 
6352         } else {
6353           LIR_Op* prev_op = instructions->at(instructions->length() - 2);
6354           if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) {
6355             assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6356             LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
6357 
6358             if (prev_branch->stub() == NULL) {
6359 
6360               LIR_Op2* prev_cmp = NULL;
6361               // There might be a cmove inserted for profiling which depends on the same
6362               // compare. If we change the condition of the respective compare, we have
6363               // to take care of this cmove as well.
6364               LIR_Op2* prev_cmove = NULL;
6365 
6366               for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
6367                 prev_op = instructions->at(j);
6368                 // check for the cmove
6369                 if (prev_op->code() == lir_cmove) {
6370                   assert(prev_op->as_Op2() != NULL, "cmove must be of type LIR_Op2");
6371                   prev_cmove = (LIR_Op2*)prev_op;
6372                   assert(prev_branch->cond() == prev_cmove->condition(), "should be the same");
6373                 }
6374                 if (prev_op->code() == lir_cmp) {
6375                   assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
6376                   prev_cmp = (LIR_Op2*)prev_op;
6377                   assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
6378                 }
6379               }
6380               // Guarantee because it is dereferenced below.
6381               guarantee(prev_cmp != NULL, "should have found comp instruction for branch");
6382               if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
6383 
6384                 TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
6385 
6386                 // eliminate a conditional branch to the immediate successor
6387                 prev_branch->change_block(last_branch->block());
6388                 prev_branch->negate_cond();
6389                 prev_cmp->set_condition(prev_branch->cond());
6390                 instructions->trunc_to(instructions->length() - 1);
6391                 // if we do change the condition, we have to change the cmove as well

1223   switch (op->code()) {
1224     case lir_move:      // fall through
1225     case lir_convert: {
1226       assert(op->as_Op1() != NULL, "lir_move, lir_convert must be LIR_Op1");
1227       LIR_Op1* move = (LIR_Op1*)op;
1228 
1229       LIR_Opr move_from = move->in_opr();
1230       LIR_Opr move_to = move->result_opr();
1231 
1232       if (move_to->is_register() && move_from->is_register()) {
1233         Interval* from = interval_at(reg_num(move_from));
1234         Interval* to = interval_at(reg_num(move_to));
1235         if (from != NULL && to != NULL) {
1236           to->set_register_hint(from);
1237           TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", move->id(), from->reg_num(), to->reg_num()));
1238         }
1239       }
1240       break;
1241     }
1242     case lir_cmove: {
1243       assert(op->as_Op4() != NULL, "lir_cmove must be LIR_Op4");
1244       LIR_Op4* cmove = (LIR_Op4*)op;
1245 
1246       LIR_Opr move_from = cmove->in_opr1();
1247       LIR_Opr move_to   = cmove->result_opr();
1248 
1249       if (move_to->is_register() && move_from->is_register()) {
1250         Interval* from = interval_at(reg_num(move_from));
1251         Interval* to = interval_at(reg_num(move_to));
1252         if (from != NULL && to != NULL) {
1253           to->set_register_hint(from);
1254           TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", cmove->id(), from->reg_num(), to->reg_num()));
1255         }
1256       }
1257       break;
1258     }
1259     default:
1260       break;
1261   }
1262 }
1263 
1264 
1265 void LinearScan::build_intervals() {
1266   TIME_LINEAR_SCAN(timer_build_intervals);
1267 

3114 
3115   sort_intervals_after_allocation();
3116 
3117   DEBUG_ONLY(verify());
3118 
3119   eliminate_spill_moves();
3120   assign_reg_num();
3121   CHECK_BAILOUT();
3122 
3123   NOT_PRODUCT(print_lir(2, "LIR after assignment of register numbers:"));
3124   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_after_asign));
3125 
3126   { TIME_LINEAR_SCAN(timer_allocate_fpu_stack);
3127 
3128     if (use_fpu_stack_allocation()) {
3129       allocate_fpu_stack(); // Only has effect on Intel
3130       NOT_PRODUCT(print_lir(2, "LIR after FPU stack allocation:"));
3131     }
3132   }
3133 
3134 #ifndef RISCV
3135   // Disable these optimizations on riscv temporarily, because it does not
3136   // work when the comparison operands are bound to branches or cmoves.
3137   { TIME_LINEAR_SCAN(timer_optimize_lir);
3138 
3139     EdgeMoveOptimizer::optimize(ir()->code());
3140     ControlFlowOptimizer::optimize(ir()->code());
3141     // check that cfg is still correct after optimizations
3142     ir()->verify();
3143   }
3144 #endif
3145 
3146   NOT_PRODUCT(print_lir(1, "Before Code Generation", false));
3147   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_final));
3148   NOT_PRODUCT(_total_timer.end_method(this));
3149 }
3150 
3151 
3152 // ********** Printing functions
3153 
3154 #ifndef PRODUCT
3155 
3156 void LinearScan::print_timers(double total) {
3157   _total_timer.print(total);
3158 }
3159 
3160 void LinearScan::print_statistics() {
3161   _stat_before_alloc.print("before allocation");
3162   _stat_after_asign.print("after assignment of register");
3163   _stat_final.print("after optimization");
3164 }

6348       if (last_branch->info() == NULL) {
6349         if (last_branch->block() == code->at(i + 1)) {
6350 
6351           TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id()));
6352 
6353           // delete last branch instruction
6354           instructions->trunc_to(instructions->length() - 1);
6355 
6356         } else {
6357           LIR_Op* prev_op = instructions->at(instructions->length() - 2);
6358           if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) {
6359             assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6360             LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
6361 
6362             if (prev_branch->stub() == NULL) {
6363 
6364               LIR_Op2* prev_cmp = NULL;
6365               // There might be a cmove inserted for profiling which depends on the same
6366               // compare. If we change the condition of the respective compare, we have
6367               // to take care of this cmove as well.
6368               LIR_Op4* prev_cmove = NULL;
6369 
6370               for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
6371                 prev_op = instructions->at(j);
6372                 // check for the cmove
6373                 if (prev_op->code() == lir_cmove) {
6374                   assert(prev_op->as_Op4() != NULL, "cmove must be of type LIR_Op4");
6375                   prev_cmove = (LIR_Op4*)prev_op;
6376                   assert(prev_branch->cond() == prev_cmove->condition(), "should be the same");
6377                 }
6378                 if (prev_op->code() == lir_cmp) {
6379                   assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
6380                   prev_cmp = (LIR_Op2*)prev_op;
6381                   assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
6382                 }
6383               }
6384               // Guarantee because it is dereferenced below.
6385               guarantee(prev_cmp != NULL, "should have found comp instruction for branch");
6386               if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
6387 
6388                 TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
6389 
6390                 // eliminate a conditional branch to the immediate successor
6391                 prev_branch->change_block(last_branch->block());
6392                 prev_branch->negate_cond();
6393                 prev_cmp->set_condition(prev_branch->cond());
6394                 instructions->trunc_to(instructions->length() - 1);
6395                 // if we do change the condition, we have to change the cmove as well
< prev index next >