< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

        

*** 80,89 **** --- 80,92 ---- #include "gc/g1/g1ThreadLocalData.hpp" #endif // INCLUDE_G1GC #if INCLUDE_ZGC #include "gc/z/c2/zBarrierSetC2.hpp" #endif + #if INCLUDE_SHENANDOAHGC + #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" + #endif // -------------------- Compile::mach_constant_base_node ----------------------- // Constant table base node singleton. MachConstantBaseNode* Compile::mach_constant_base_node() {
*** 389,398 **** --- 392,409 ---- } } if (n->outcnt() == 1 && n->has_special_unique_user()) { record_for_igvn(n->unique_out()); } + #if INCLUDE_SHENANDOAHGC + // TODO: Move into below eliminate_useless_gc_barriers(..) below + if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) { + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + record_for_igvn(n->fast_out(i)); + } + } + #endif } // Remove useless macro and predicate opaq nodes for (int i = C->macro_count()-1; i >= 0; i--) { Node* n = C->macro_node(i); if (!useful.member(n)) {
*** 643,653 **** _save_argument_registers(false), _stub_name(NULL), _stub_function(NULL), _stub_entry_point(NULL), _method(target), - _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())), _entry_bci(osr_bci), _initial_gvn(NULL), _for_igvn(NULL), _warm_calls(NULL), _subsume_loads(subsume_loads), --- 654,663 ----
*** 674,683 **** --- 684,694 ---- #endif _congraph(NULL), _comp_arena(mtCompiler), _node_arena(mtCompiler), _old_arena(mtCompiler), + _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())), _Compile_types(mtCompiler), _replay_inline_data(NULL), _late_inlines(comp_arena(), 2, 0, NULL), _string_late_inlines(comp_arena(), 2, 0, NULL), _boxing_late_inlines(comp_arena(), 2, 0, NULL),
*** 1454,1463 **** --- 1465,1479 ---- ptr = TypePtr::BotPTR; } else if( offset == oopDesc::mark_offset_in_bytes() ) { tj = TypeInstPtr::MARK; ta = TypeAryPtr::RANGE; // generic ignored junk ptr = TypePtr::BotPTR; + #if INCLUDE_SHENANDOAHGC + } else if (offset == ShenandoahBrooksPointer::byte_offset() && UseShenandoahGC) { + // Need to distinguish brooks ptr as is. + tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset); + #endif } else { // Random constant offset into array body offset = Type::OffsetBot; // Flatten constant access into array body tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset); } }
*** 1518,1528 **** // First handle header references such as a LoadKlassNode, even if the // object's klass is unloaded at compile time (4965979). if (!is_known_inst) { // Do it only for non-instance types tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); } ! } else if (offset < 0 || offset >= k->size_helper() * wordSize) { // Static fields are in the space above the normal instance // fields in the java.lang.Class instance. if (to->klass() != ciEnv::current()->Class_klass()) { to = NULL; tj = TypeOopPtr::BOTTOM; --- 1534,1544 ---- // First handle header references such as a LoadKlassNode, even if the // object's klass is unloaded at compile time (4965979). if (!is_known_inst) { // Do it only for non-instance types tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); } ! } else if (SHENANDOAHGC_ONLY((offset != ShenandoahBrooksPointer::byte_offset() || !UseShenandoahGC) &&) (offset < 0 || offset >= k->size_helper() * wordSize)) { // Static fields are in the space above the normal instance // fields in the java.lang.Class instance. if (to->klass() != ciEnv::current()->Class_klass()) { to = NULL; tj = TypeOopPtr::BOTTOM;
*** 1616,1626 **** (offset == Type::OffsetBot && tj->base() == Type::AryPtr) || (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) || (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) || (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) || (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) || ! (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr) , "For oops, klasses, raw offset must be constant; for arrays the offset is never known" ); assert( tj->ptr() != TypePtr::TopPTR && tj->ptr() != TypePtr::AnyNull && tj->ptr() != TypePtr::Null, "No imprecise addresses" ); // assert( tj->ptr() != TypePtr::Constant || --- 1632,1643 ---- (offset == Type::OffsetBot && tj->base() == Type::AryPtr) || (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) || (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) || (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) || (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) || ! (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr) || ! (UseShenandoahGC SHENANDOAHGC_ONLY(&& offset == ShenandoahBrooksPointer::byte_offset() && tj->base() == Type::AryPtr)), "For oops, klasses, raw offset must be constant; for arrays the offset is never known" ); assert( tj->ptr() != TypePtr::TopPTR && tj->ptr() != TypePtr::AnyNull && tj->ptr() != TypePtr::Null, "No imprecise addresses" ); // assert( tj->ptr() != TypePtr::Constant ||
*** 2102,2112 **** if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) { TracePhase tp("incrementalInline_ideal", &timers[_t_incrInline_ideal]); // PhaseIdealLoop is expensive so we only try it once we are // out of live nodes and we only try it again if the previous // helped got the number of nodes down significantly ! PhaseIdealLoop ideal_loop( igvn, false, true ); if (failing()) return; low_live_nodes = live_nodes(); _major_progress = true; } --- 2119,2129 ---- if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) { TracePhase tp("incrementalInline_ideal", &timers[_t_incrInline_ideal]); // PhaseIdealLoop is expensive so we only try it once we are // out of live nodes and we only try it again if the previous // helped got the number of nodes down significantly ! PhaseIdealLoop ideal_loop(igvn, LoopOptsNone); if (failing()) return; low_live_nodes = live_nodes(); _major_progress = true; }
*** 2153,2162 **** --- 2170,2194 ---- set_inlining_incrementally(false); } + bool Compile::optimize_loops(int& loop_opts_cnt, PhaseIterGVN& igvn, LoopOptsMode mode) { + if(loop_opts_cnt > 0) { + debug_only( int cnt = 0; ); + while(major_progress() && (loop_opts_cnt > 0)) { + TracePhase tp("idealLoop", &timers[_t_idealLoop]); + assert( cnt++ < 40, "infinite cycle in loop optimization" ); + PhaseIdealLoop ideal_loop(igvn, mode); + loop_opts_cnt--; + if (failing()) return false; + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); + } + } + return true; + } + //------------------------------Optimize--------------------------------------- // Given a graph, optimize it. void Compile::Optimize() { TracePhase tp("optimizer", &timers[_t_optimizer]);
*** 2191,2204 **** { TracePhase tp("iterGVN", &timers[_t_iterGVN]); igvn.optimize(); } - print_method(PHASE_ITER_GVN1, 2); - if (failing()) return; inline_incrementally(igvn); print_method(PHASE_INCREMENTAL_INLINE, 2); if (failing()) return; --- 2223,2236 ---- { TracePhase tp("iterGVN", &timers[_t_iterGVN]); igvn.optimize(); } if (failing()) return; + print_method(PHASE_ITER_GVN1, 2); + inline_incrementally(igvn); print_method(PHASE_INCREMENTAL_INLINE, 2); if (failing()) return;
*** 2243,2253 **** // Perform escape analysis if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) { if (has_loops()) { // Cleanup graph (remove dead nodes). TracePhase tp("idealLoop", &timers[_t_idealLoop]); ! PhaseIdealLoop ideal_loop( igvn, false, true ); if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2); if (failing()) return; } ConnectionGraph::do_analysis(this, &igvn); --- 2275,2285 ---- // Perform escape analysis if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) { if (has_loops()) { // Cleanup graph (remove dead nodes). TracePhase tp("idealLoop", &timers[_t_idealLoop]); ! PhaseIdealLoop ideal_loop(igvn, LoopOptsNone); if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2); if (failing()) return; } ConnectionGraph::do_analysis(this, &igvn);
*** 2278,2304 **** // Set loop opts counter loop_opts_cnt = num_loop_opts(); if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) { { TracePhase tp("idealLoop", &timers[_t_idealLoop]); ! PhaseIdealLoop ideal_loop( igvn, true ); loop_opts_cnt--; if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2); if (failing()) return; } // Loop opts pass if partial peeling occurred in previous pass if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) { TracePhase tp("idealLoop", &timers[_t_idealLoop]); ! PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2); if (failing()) return; } // Loop opts pass for loop-unrolling before CCP if(major_progress() && (loop_opts_cnt > 0)) { TracePhase tp("idealLoop", &timers[_t_idealLoop]); ! PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2); } if (!failing()) { // Verify that last round of loop opts produced a valid graph --- 2310,2336 ---- // Set loop opts counter loop_opts_cnt = num_loop_opts(); if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) { { TracePhase tp("idealLoop", &timers[_t_idealLoop]); ! PhaseIdealLoop ideal_loop(igvn, LoopOptsDefault); loop_opts_cnt--; if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2); if (failing()) return; } // Loop opts pass if partial peeling occurred in previous pass if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) { TracePhase tp("idealLoop", &timers[_t_idealLoop]); ! PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf); loop_opts_cnt--; if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2); if (failing()) return; } // Loop opts pass for loop-unrolling before CCP if(major_progress() && (loop_opts_cnt > 0)) { TracePhase tp("idealLoop", &timers[_t_idealLoop]); ! PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf); loop_opts_cnt--; if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2); } if (!failing()) { // Verify that last round of loop opts produced a valid graph
*** 2330,2349 **** if (failing()) return; // Loop transforms on the ideal graph. Range Check Elimination, // peeling, unrolling, etc. ! if(loop_opts_cnt > 0) { ! debug_only( int cnt = 0; ); ! while(major_progress() && (loop_opts_cnt > 0)) { ! TracePhase tp("idealLoop", &timers[_t_idealLoop]); ! assert( cnt++ < 40, "infinite cycle in loop optimization" ); ! PhaseIdealLoop ideal_loop( igvn, true); ! loop_opts_cnt--; ! if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); ! if (failing()) return; ! } } #if INCLUDE_ZGC if (UseZGC) { ZBarrierSetC2::find_dominating_barriers(igvn); --- 2362,2373 ---- if (failing()) return; // Loop transforms on the ideal graph. Range Check Elimination, // peeling, unrolling, etc. ! if (!optimize_loops(loop_opts_cnt, igvn, LoopOptsDefault)) { ! return; } #if INCLUDE_ZGC if (UseZGC) { ZBarrierSetC2::find_dominating_barriers(igvn);
*** 2381,2390 **** --- 2405,2423 ---- assert(failing(), "must bail out w/ explicit message"); return; } } + print_method(PHASE_BEFORE_BARRIER_EXPAND, 2); + + #if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && !ShenandoahWriteBarrierNode::expand(this, igvn, loop_opts_cnt)) { + assert(failing(), "must bail out w/ explicit message"); + return; + } + #endif + if (opaque4_count() > 0) { C->remove_opaque4_nodes(igvn); igvn.optimize(); }
*** 2810,2819 **** --- 2843,2863 ---- case Op_CallRuntime: case Op_CallLeaf: case Op_CallLeafNoFP: { assert (n->is_Call(), ""); CallNode *call = n->as_Call(); + #if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC && ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) { + uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt(); + if (call->req() > cnt) { + assert(call->req() == cnt+1, "only one extra input"); + Node* addp = call->in(cnt); + assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?"); + call->del_req(cnt); + } + } + #endif // Count call sites where the FP mode bit would have to be flipped. // Do not count uncommon runtime calls: // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking, // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ... if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
*** 3012,3022 **** wq.push(n); for (uint next = 0; next < wq.size(); ++next) { Node *m = wq.at(next); for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { Node* use = m->fast_out(i); ! if (use->is_Mem() || use->is_EncodeNarrowPtr()) { use->ensure_control_or_add_prec(n->in(0)); } else { switch(use->Opcode()) { case Op_AddP: case Op_DecodeN: --- 3056,3066 ---- wq.push(n); for (uint next = 0; next < wq.size(); ++next) { Node *m = wq.at(next); for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { Node* use = m->fast_out(i); ! if (use->is_Mem() || use->is_EncodeNarrowPtr() || use->is_ShenandoahBarrier()) { use->ensure_control_or_add_prec(n->in(0)); } else { switch(use->Opcode()) { case Op_AddP: case Op_DecodeN:
*** 3348,3357 **** --- 3392,3408 ---- // confuses register allocation. if (n->req() > MemBarNode::Precedent) { n->set_req(MemBarNode::Precedent, top()); } break; + #if INCLUDE_SHENANDOAHGC + case Op_ShenandoahReadBarrier: + break; + case Op_ShenandoahWriteBarrier: + assert(false, "should have been expanded already"); + break; + #endif case Op_RangeCheck: { RangeCheckNode* rc = n->as_RangeCheck(); Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt); n->subsume_by(iff, this); frc._tests.push(iff);
*** 3785,3798 **** // Verify GC barriers consistency // Currently supported: // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre()) void Compile::verify_barriers() { ! #if INCLUDE_G1GC ! if (UseG1GC) { // Verify G1 pre-barriers const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); ResourceArea *area = Thread::current()->resource_area(); Unique_Node_List visited(area); Node_List worklist(area); // We're going to walk control flow backwards starting from the Root --- 3836,3857 ---- // Verify GC barriers consistency // Currently supported: // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre()) void Compile::verify_barriers() { ! #if INCLUDE_G1GC || INCLUDE_SHENANDOAHGC ! if (UseG1GC || UseShenandoahGC) { // Verify G1 pre-barriers + + #if INCLUDE_G1GC && INCLUDE_SHENANDOAHGC + const int marking_offset = in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_active_offset() + : ShenandoahThreadLocalData::satb_mark_queue_active_offset()); + #elif INCLUDE_G1GC const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); + #else + const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()); + #endif ResourceArea *area = Thread::current()->resource_area(); Unique_Node_List visited(area); Node_List worklist(area); // We're going to walk control flow backwards starting from the Root
*** 4544,4554 **** TypeNode* tn = n->as_Type(); const Type* t = tn->type(); const Type* t_no_spec = t->remove_speculative(); if (t_no_spec != t) { bool in_hash = igvn.hash_delete(n); ! assert(in_hash, "node should be in igvn hash table"); tn->set_type(t_no_spec); igvn.hash_insert(n); igvn._worklist.push(n); // give it a chance to go away modified++; } --- 4603,4613 ---- TypeNode* tn = n->as_Type(); const Type* t = tn->type(); const Type* t_no_spec = t->remove_speculative(); if (t_no_spec != t) { bool in_hash = igvn.hash_delete(n); ! assert(in_hash || (UseShenandoahGC && n->hash() == Node::NO_HASH), "node should be in igvn hash table"); tn->set_type(t_no_spec); igvn.hash_insert(n); igvn._worklist.push(n); // give it a chance to go away modified++; }
< prev index next >