13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/method.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/c2compiler.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/idealGraphPrinter.hpp"
34 #include "opto/locknode.hpp"
35 #include "opto/memnode.hpp"
36 #include "opto/opaquenode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/type.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/copy.hpp"
46
47 // Static array so we can figure out which bytecodes stop us from compiling
48 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
49 // and eventually should be encapsulated in a proper class (gri 8/18/98).
50
51 #ifndef PRODUCT
52 int nodes_created = 0;
53 int methods_parsed = 0;
85 }
86 if (all_null_checks_found) {
87 tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks,
88 (100*implicit_null_checks)/all_null_checks_found);
89 }
90 if (SharedRuntime::_implicit_null_throws) {
91 tty->print_cr("%d implicit null exceptions at runtime",
92 SharedRuntime::_implicit_null_throws);
93 }
94
95 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
96 BytecodeParseHistogram::print();
97 }
98 }
99 #endif
100
101 //------------------------------ON STACK REPLACEMENT---------------------------
102
103 // Construct a node which can be used to get incoming state for
104 // on stack replacement.
105 Node *Parse::fetch_interpreter_state(int index,
106 BasicType bt,
107 Node *local_addrs,
108 Node *local_addrs_base) {
109 Node *mem = memory(Compile::AliasIdxRaw);
110 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
111 Node *ctl = control();
112
113 // Very similar to LoadNode::make, except we handle un-aligned longs and
114 // doubles on Sparc. Intel can handle them just fine directly.
115 Node *l = NULL;
116 switch (bt) { // Signature is flattened
117 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
118 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
119 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
120 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
121 case T_LONG:
122 case T_DOUBLE: {
123 // Since arguments are in reverse order, the argument address 'adr'
124 // refers to the back half of the long/double. Recompute adr.
125 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
126 if (Matcher::misaligned_doubles_ok) {
127 l = (bt == T_DOUBLE)
128 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
129 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
130 } else {
131 l = (bt == T_DOUBLE)
132 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
133 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
134 }
135 break;
136 }
137 default: ShouldNotReachHere();
138 }
139 return _gvn.transform(l);
140 }
141
142 // Helper routine to prevent the interpreter from handing
143 // unexpected typestate to an OSR method.
144 // The Node l is a value newly dug out of the interpreter frame.
145 // The type is the type predicted by ciTypeFlow. Note that it is
146 // not a general type, but can only come from Type::get_typeflow_type.
147 // The safepoint is a map which will feed an uncommon trap.
148 Node* Parse::check_interpreter_type(Node* l, const Type* type,
149 SafePointNode* &bad_type_exit) {
150
151 const TypeOopPtr* tp = type->isa_oopptr();
152
153 // TypeFlow may assert null-ness if a type appears unloaded.
154 if (type == TypePtr::NULL_PTR ||
155 (tp != NULL && !tp->is_loaded())) {
156 // Value must be null, not a real oop.
157 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
158 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
159 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
160 set_control(_gvn.transform( new IfTrueNode(iff) ));
161 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
162 bad_type_exit->control()->add_req(bad_type);
163 l = null();
164 }
165
166 // Typeflow can also cut off paths from the CFG, based on
167 // types which appear unloaded, or call sites which appear unlinked.
168 // When paths are cut off, values at later merge points can rise
169 // toward more specific classes. Make sure these specific classes
170 // are still in effect.
171 if (tp != NULL && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
172 // TypeFlow asserted a specific object type. Value must have that type.
173 Node* bad_type_ctrl = NULL;
174 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
175 bad_type_exit->control()->add_req(bad_type_ctrl);
176 }
177
178 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
179 return l;
180 }
181
182 // Helper routine which sets up elements of the initial parser map when
183 // performing a parse for on stack replacement. Add values into map.
184 // The only parameter contains the address of a interpreter arguments.
185 void Parse::load_interpreter_state(Node* osr_buf) {
186 int index;
187 int max_locals = jvms()->loc_size();
188 int max_stack = jvms()->stk_size();
189
190
191 // Mismatch between method and jvms can occur since map briefly held
192 // an OSR entry state (which takes up one RawPtr word).
193 assert(max_locals == method()->max_locals(), "sanity");
194 assert(max_stack >= method()->max_stack(), "sanity");
195 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
196 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
197
198 // Find the start block.
199 Block* osr_block = start_block();
200 assert(osr_block->start() == osr_bci(), "sanity");
201
202 // Set initial BCI.
203 set_parse_bci(osr_block->start());
204
205 // Set initial stack depth.
206 set_sp(osr_block->start_sp());
207
208 // Check bailouts. We currently do not perform on stack replacement
209 // of loops in catch blocks or loops which branch with a non-empty stack.
210 if (sp() != 0) {
211 C->record_method_not_compilable("OSR starts with non-empty stack");
212 return;
213 }
214 // Do not OSR inside finally clauses:
215 if (osr_block->has_trap_at(osr_block->start())) {
216 C->record_method_not_compilable("OSR starts with an immediate trap");
217 return;
218 }
219
220 // Commute monitors from interpreter frame to compiler frame.
221 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
222 int mcnt = osr_block->flow()->monitor_count();
223 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
224 for (index = 0; index < mcnt; index++) {
225 // Make a BoxLockNode for the monitor.
226 Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
227
228
229 // Displaced headers and locked objects are interleaved in the
230 // temp OSR buffer. We only copy the locked objects out here.
231 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
232 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
233 // Try and copy the displaced header to the BoxNode
234 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
235
236
237 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
238
239 // Build a bogus FastLockNode (no code will be generated) and push the
240 // monitor into our debug info.
241 const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
242 map()->push_monitor(flock);
243
244 // If the lock is our method synchronization lock, tuck it away in
245 // _sync_lock for return and rethrow exit paths.
246 if (index == 0 && method()->is_synchronized()) {
247 _synch_lock = flock;
248 }
249 }
250
251 // Use the raw liveness computation to make sure that unexpected
252 // values don't propagate into the OSR frame.
253 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
254 if (!live_locals.is_valid()) {
255 // Degenerate or breakpointed method.
282 if (C->log() != NULL) {
283 C->log()->elem("OSR_mismatch local_index='%d'",index);
284 }
285 set_local(index, null());
286 // and ignore it for the loads
287 continue;
288 }
289 }
290
291 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
292 if (type == Type::TOP || type == Type::HALF) {
293 continue;
294 }
295 // If the type falls to bottom, then this must be a local that
296 // is mixing ints and oops or some such. Forcing it to top
297 // makes it go dead.
298 if (type == Type::BOTTOM) {
299 continue;
300 }
301 // Construct code to access the appropriate local.
302 BasicType bt = type->basic_type();
303 if (type == TypePtr::NULL_PTR) {
304 // Ptr types are mixed together with T_ADDRESS but NULL is
305 // really for T_OBJECT types so correct it.
306 bt = T_OBJECT;
307 }
308 Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
309 set_local(index, value);
310 }
311
312 // Extract the needed stack entries from the interpreter frame.
313 for (index = 0; index < sp(); index++) {
314 const Type *type = osr_block->stack_type_at(index);
315 if (type != Type::TOP) {
316 // Currently the compiler bails out when attempting to on stack replace
317 // at a bci with a non-empty stack. We should not reach here.
318 ShouldNotReachHere();
319 }
320 }
321
322 // End the OSR migration
323 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
324 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
325 "OSR_migration_end", TypeRawPtr::BOTTOM,
326 osr_buf);
327
328 // Now that the interpreter state is loaded, make sure it will match
578 do_method_entry();
579 }
580
581 if (depth() == 1 && !failing()) {
582 if (C->clinit_barrier_on_entry()) {
583 // Add check to deoptimize the nmethod once the holder class is fully initialized
584 clinit_deopt();
585 }
586
587 // Add check to deoptimize the nmethod if RTM state was changed
588 rtm_deopt();
589 }
590
591 // Check for bailouts during method entry or RTM state check setup.
592 if (failing()) {
593 if (log) log->done("parse");
594 C->set_default_node_notes(caller_nn);
595 return;
596 }
597
598 entry_map = map(); // capture any changes performed by method setup code
599 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
600
601 // We begin parsing as if we have just encountered a jump to the
602 // method entry.
603 Block* entry_block = start_block();
604 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
605 set_map_clone(entry_map);
606 merge_common(entry_block, entry_block->next_path_num());
607
608 #ifndef PRODUCT
609 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
610 set_parse_histogram( parse_histogram_obj );
611 #endif
612
613 // Parse all the basic blocks.
614 do_all_blocks();
615
616 C->set_default_node_notes(caller_nn);
617
760 void Parse::build_exits() {
761 // make a clone of caller to prevent sharing of side-effects
762 _exits.set_map(_exits.clone_map());
763 _exits.clean_stack(_exits.sp());
764 _exits.sync_jvms();
765
766 RegionNode* region = new RegionNode(1);
767 record_for_igvn(region);
768 gvn().set_type_bottom(region);
769 _exits.set_control(region);
770
771 // Note: iophi and memphi are not transformed until do_exits.
772 Node* iophi = new PhiNode(region, Type::ABIO);
773 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
774 gvn().set_type_bottom(iophi);
775 gvn().set_type_bottom(memphi);
776 _exits.set_i_o(iophi);
777 _exits.set_all_memory(memphi);
778
779 // Add a return value to the exit state. (Do not push it yet.)
780 if (tf()->range()->cnt() > TypeFunc::Parms) {
781 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
782 if (ret_type->isa_int()) {
783 BasicType ret_bt = method()->return_type()->basic_type();
784 if (ret_bt == T_BOOLEAN ||
785 ret_bt == T_CHAR ||
786 ret_bt == T_BYTE ||
787 ret_bt == T_SHORT) {
788 ret_type = TypeInt::INT;
789 }
790 }
791
792 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
793 // becomes loaded during the subsequent parsing, the loaded and unloaded
794 // types will not join when we transform and push in do_exits().
795 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
796 if (ret_oop_type && !ret_oop_type->is_loaded()) {
797 ret_type = TypeOopPtr::BOTTOM;
798 }
799 int ret_size = type2size[ret_type->basic_type()];
800 Node* ret_phi = new PhiNode(region, ret_type);
801 gvn().set_type_bottom(ret_phi);
802 _exits.ensure_stack(ret_size);
803 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
804 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
805 _exits.set_argument(0, ret_phi); // here is where the parser finds it
806 // Note: ret_phi is not yet pushed, until do_exits.
807 }
808 }
809
810
811 //----------------------------build_start_state-------------------------------
812 // Construct a state which contains only the incoming arguments from an
813 // unknown caller. The method & bci will be NULL & InvocationEntryBci.
814 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
815 int arg_size = tf->domain()->cnt();
816 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
817 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
818 SafePointNode* map = new SafePointNode(max_size, jvms);
819 record_for_igvn(map);
820 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
821 Node_Notes* old_nn = default_node_notes();
822 if (old_nn != NULL && has_method()) {
823 Node_Notes* entry_nn = old_nn->clone(this);
824 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
825 entry_jvms->set_offsets(0);
826 entry_jvms->set_bci(entry_bci());
827 entry_nn->set_jvms(entry_jvms);
828 set_default_node_notes(entry_nn);
829 }
830 uint i;
831 for (i = 0; i < (uint)arg_size; i++) {
832 Node* parm = initial_gvn()->transform(new ParmNode(start, i));
833 map->init_req(i, parm);
834 // Record all these guys for later GVN.
835 record_for_igvn(parm);
836 }
837 for (; i < map->req(); i++) {
838 map->init_req(i, top());
839 }
840 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
841 set_default_node_notes(old_nn);
842 jvms->set_map(map);
843 return jvms;
844 }
845
846 //-----------------------------make_node_notes---------------------------------
847 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
848 if (caller_nn == NULL) return NULL;
849 Node_Notes* nn = caller_nn->clone(C);
850 JVMState* caller_jvms = nn->jvms();
851 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
852 jvms->set_offsets(0);
853 jvms->set_bci(_entry_bci);
854 nn->set_jvms(jvms);
855 return nn;
856 }
857
858
859 //--------------------------return_values--------------------------------------
860 void Compile::return_values(JVMState* jvms) {
861 GraphKit kit(jvms);
862 Node* ret = new ReturnNode(TypeFunc::Parms,
863 kit.control(),
864 kit.i_o(),
865 kit.reset_memory(),
866 kit.frameptr(),
867 kit.returnadr());
868 // Add zero or 1 return values
869 int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
870 if (ret_size > 0) {
871 kit.inc_sp(-ret_size); // pop the return value(s)
872 kit.sync_jvms();
873 ret->add_req(kit.argument(0));
874 // Note: The second dummy edge is not needed by a ReturnNode.
875 }
876 // bind it to root
877 root()->add_req(ret);
878 record_for_igvn(ret);
879 initial_gvn()->transform_no_reclaim(ret);
880 }
881
882 //------------------------rethrow_exceptions-----------------------------------
883 // Bind all exception states in the list into a single RethrowNode.
884 void Compile::rethrow_exceptions(JVMState* jvms) {
885 GraphKit kit(jvms);
886 if (!kit.has_exceptions()) return; // nothing to generate
887 // Load my combined exception state into the kit, with all phis transformed:
888 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
889 Node* ex_oop = kit.use_exception_state(ex_map);
890 RethrowNode* exit = new RethrowNode(kit.control(),
891 kit.i_o(), kit.reset_memory(),
892 kit.frameptr(), kit.returnadr(),
893 // like a return but with exception input
894 ex_oop);
978 // to complete, we force all writes to complete.
979 //
980 // 2. Experimental VM option is used to force the barrier if any field
981 // was written out in the constructor.
982 //
983 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
984 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
985 // MemBarVolatile is used before volatile load instead of after volatile
986 // store, so there's no barrier after the store.
987 // We want to guarantee the same behavior as on platforms with total store
988 // order, although this is not required by the Java memory model.
989 // In this case, we want to enforce visibility of volatile field
990 // initializations which are performed in constructors.
991 // So as with finals, we add a barrier here.
992 //
993 // "All bets are off" unless the first publication occurs after a
994 // normal return from the constructor. We do not attempt to detect
995 // such unusual early publications. But no barrier is needed on
996 // exceptional returns, since they cannot publish normally.
997 //
998 if (method()->is_initializer() &&
999 (wrote_final() ||
1000 (AlwaysSafeConstructors && wrote_fields()) ||
1001 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1002 _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1003
1004 // If Memory barrier is created for final fields write
1005 // and allocation node does not escape the initialize method,
1006 // then barrier introduced by allocation node can be removed.
1007 if (DoEscapeAnalysis && alloc_with_final()) {
1008 AllocateNode *alloc = AllocateNode::Ideal_allocation(alloc_with_final(), &_gvn);
1009 alloc->compute_MemBar_redundancy(method());
1010 }
1011 if (PrintOpto && (Verbose || WizardMode)) {
1012 method()->print_name();
1013 tty->print_cr(" writes finals and needs a memory barrier");
1014 }
1015 }
1016
1017 // Any method can write a @Stable field; insert memory barriers
1018 // after those also. Can't bind predecessor allocation node (if any)
1019 // with barrier because allocation doesn't always dominate
1020 // MemBarRelease.
1021 if (wrote_stable()) {
1022 _exits.insert_mem_bar(Op_MemBarRelease);
1023 if (PrintOpto && (Verbose || WizardMode)) {
1024 method()->print_name();
1025 tty->print_cr(" writes @Stable and needs a memory barrier");
1026 }
1027 }
1028
1029 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1030 // transform each slice of the original memphi:
1031 mms.set_memory(_gvn.transform(mms.memory()));
1032 }
1033 // Clean up input MergeMems created by transforming the slices
1034 _gvn.transform(_exits.merged_memory());
1035
1036 if (tf()->range()->cnt() > TypeFunc::Parms) {
1037 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1038 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1039 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1040 // If the type we set for the ret_phi in build_exits() is too optimistic and
1041 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1042 // loading. It could also be due to an error, so mark this method as not compilable because
1043 // otherwise this could lead to an infinite compile loop.
1044 // In any case, this code path is rarely (and never in my testing) reached.
1045 C->record_method_not_compilable("Can't determine return type.");
1046 return;
1047 }
1048 if (ret_type->isa_int()) {
1049 BasicType ret_bt = method()->return_type()->basic_type();
1050 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1051 }
1052 _exits.push_node(ret_type->basic_type(), ret_phi);
1053 }
1054
1055 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1056
1057 // Unlock along the exceptional paths.
1110 }
1111
1112 //-----------------------------create_entry_map-------------------------------
1113 // Initialize our parser map to contain the types at method entry.
1114 // For OSR, the map contains a single RawPtr parameter.
1115 // Initial monitor locking for sync. methods is performed by do_method_entry.
1116 SafePointNode* Parse::create_entry_map() {
1117 // Check for really stupid bail-out cases.
1118 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1119 if (len >= 32760) {
1120 C->record_method_not_compilable("too many local variables");
1121 return NULL;
1122 }
1123
1124 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1125 _caller->map()->delete_replaced_nodes();
1126
1127 // If this is an inlined method, we may have to do a receiver null check.
1128 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1129 GraphKit kit(_caller);
1130 kit.null_check_receiver_before_call(method());
1131 _caller = kit.transfer_exceptions_into_jvms();
1132 if (kit.stopped()) {
1133 _exits.add_exception_states_from(_caller);
1134 _exits.set_jvms(_caller);
1135 return NULL;
1136 }
1137 }
1138
1139 assert(method() != NULL, "parser must have a method");
1140
1141 // Create an initial safepoint to hold JVM state during parsing
1142 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
1143 set_map(new SafePointNode(len, jvms));
1144 jvms->set_map(map());
1145 record_for_igvn(map());
1146 assert(jvms->endoff() == len, "correct jvms sizing");
1147
1148 SafePointNode* inmap = _caller->map();
1149 assert(inmap != NULL, "must have inmap");
1150 // In case of null check on receiver above
1151 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1152
1153 uint i;
1154
1155 // Pass thru the predefined input parameters.
1156 for (i = 0; i < TypeFunc::Parms; i++) {
1157 map()->init_req(i, inmap->in(i));
1158 }
1159
1160 if (depth() == 1) {
1161 assert(map()->memory()->Opcode() == Op_Parm, "");
1162 // Insert the memory aliasing node
1163 set_all_memory(reset_memory());
1164 }
1165 assert(merged_memory(), "");
1166
1167 // Now add the locals which are initially bound to arguments:
1168 uint arg_size = tf()->domain()->cnt();
1169 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1170 for (i = TypeFunc::Parms; i < arg_size; i++) {
1171 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1172 }
1173
1174 // Clear out the rest of the map (locals and stack)
1175 for (i = arg_size; i < len; i++) {
1176 map()->init_req(i, top());
1177 }
1178
1179 SafePointNode* entry_map = stop();
1180 return entry_map;
1181 }
1182
1183 //-----------------------------do_method_entry--------------------------------
1184 // Emit any code needed in the pseudo-block before BCI zero.
1185 // The main thing to do is lock the receiver of a synchronized method.
1186 void Parse::do_method_entry() {
1187 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1188 set_sp(0); // Java Stack Pointer
1222
1223 // If the method is synchronized, we need to construct a lock node, attach
1224 // it to the Start node, and pin it there.
1225 if (method()->is_synchronized()) {
1226 // Insert a FastLockNode right after the Start which takes as arguments
1227 // the current thread pointer, the "this" pointer & the address of the
1228 // stack slot pair used for the lock. The "this" pointer is a projection
1229 // off the start node, but the locking spot has to be constructed by
1230 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1231 // becomes the second argument to the FastLockNode call. The
1232 // FastLockNode becomes the new control parent to pin it to the start.
1233
1234 // Setup Object Pointer
1235 Node *lock_obj = NULL;
1236 if (method()->is_static()) {
1237 ciInstance* mirror = _method->holder()->java_mirror();
1238 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1239 lock_obj = makecon(t_lock);
1240 } else { // Else pass the "this" pointer,
1241 lock_obj = local(0); // which is Parm0 from StartNode
1242 }
1243 // Clear out dead values from the debug info.
1244 kill_dead_locals();
1245 // Build the FastLockNode
1246 _synch_lock = shared_lock(lock_obj);
1247 }
1248
1249 // Feed profiling data for parameters to the type system so it can
1250 // propagate it as speculative types
1251 record_profiled_parameters_for_speculation();
1252 }
1253
1254 //------------------------------init_blocks------------------------------------
1255 // Initialize our parser map to contain the types/monitors at method entry.
1256 void Parse::init_blocks() {
1257 // Create the blocks.
1258 _block_count = flow()->block_count();
1259 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1260
1261 // Initialize the structs.
1633 //--------------------handle_missing_successor---------------------------------
1634 void Parse::handle_missing_successor(int target_bci) {
1635 #ifndef PRODUCT
1636 Block* b = block();
1637 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1638 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1639 #endif
1640 ShouldNotReachHere();
1641 }
1642
1643 //--------------------------merge_common---------------------------------------
1644 void Parse::merge_common(Parse::Block* target, int pnum) {
1645 if (TraceOptoParse) {
1646 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1647 }
1648
1649 // Zap extra stack slots to top
1650 assert(sp() == target->start_sp(), "");
1651 clean_stack(sp());
1652
1653 if (!target->is_merged()) { // No prior mapping at this bci
1654 if (TraceOptoParse) { tty->print(" with empty state"); }
1655
1656 // If this path is dead, do not bother capturing it as a merge.
1657 // It is "as if" we had 1 fewer predecessors from the beginning.
1658 if (stopped()) {
1659 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1660 return;
1661 }
1662
1663 // Make a region if we know there are multiple or unpredictable inputs.
1664 // (Also, if this is a plain fall-through, we might see another region,
1665 // which must not be allowed into this block's map.)
1666 if (pnum > PhiNode::Input // Known multiple inputs.
1667 || target->is_handler() // These have unpredictable inputs.
1668 || target->is_loop_head() // Known multiple inputs
1669 || control()->is_Region()) { // We must hide this guy.
1670
1671 int current_bci = bci();
1672 set_parse_bci(target->start()); // Set target bci
1686 gvn().set_type(r, Type::CONTROL);
1687 record_for_igvn(r);
1688 // zap all inputs to NULL for debugging (done in Node(uint) constructor)
1689 // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
1690 r->init_req(pnum, control());
1691 set_control(r);
1692 set_parse_bci(current_bci); // Restore bci
1693 }
1694
1695 // Convert the existing Parser mapping into a mapping at this bci.
1696 store_state_to(target);
1697 assert(target->is_merged(), "do not come here twice");
1698
1699 } else { // Prior mapping at this bci
1700 if (TraceOptoParse) { tty->print(" with previous state"); }
1701 #ifdef ASSERT
1702 if (target->is_SEL_head()) {
1703 target->mark_merged_backedge(block());
1704 }
1705 #endif
1706 // We must not manufacture more phis if the target is already parsed.
1707 bool nophi = target->is_parsed();
1708
1709 SafePointNode* newin = map();// Hang on to incoming mapping
1710 Block* save_block = block(); // Hang on to incoming block;
1711 load_state_from(target); // Get prior mapping
1712
1713 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1714 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1715 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1716 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1717
1718 // Iterate over my current mapping and the old mapping.
1719 // Where different, insert Phi functions.
1720 // Use any existing Phi functions.
1721 assert(control()->is_Region(), "must be merging to a region");
1722 RegionNode* r = control()->as_Region();
1723
1724 // Compute where to merge into
1725 // Merge incoming control path
1726 r->init_req(pnum, newin->control());
1727
1728 if (pnum == 1) { // Last merge for this Region?
1729 if (!block()->flow()->is_irreducible_entry()) {
1730 Node* result = _gvn.transform_no_reclaim(r);
1731 if (r != result && TraceOptoParse) {
1732 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1733 }
1734 }
1735 record_for_igvn(r);
1736 }
1737
1738 // Update all the non-control inputs to map:
1739 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1740 bool check_elide_phi = target->is_SEL_backedge(save_block);
1741 for (uint j = 1; j < newin->req(); j++) {
1742 Node* m = map()->in(j); // Current state of target.
1743 Node* n = newin->in(j); // Incoming change to target state.
1744 PhiNode* phi;
1745 if (m->is_Phi() && m->as_Phi()->region() == r)
1746 phi = m->as_Phi();
1747 else
1748 phi = NULL;
1749 if (m != n) { // Different; must merge
1750 switch (j) {
1751 // Frame pointer and Return Address never changes
1752 case TypeFunc::FramePtr:// Drop m, use the original value
1753 case TypeFunc::ReturnAdr:
1754 break;
1755 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1756 assert(phi == NULL, "the merge contains phis, not vice versa");
1757 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1758 continue;
1759 default: // All normal stuff
1760 if (phi == NULL) {
1761 const JVMState* jvms = map()->jvms();
1762 if (EliminateNestedLocks &&
1763 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1764 // BoxLock nodes are not commoning.
1765 // Use old BoxLock node as merged box.
1766 assert(newin->jvms()->is_monitor_box(j), "sanity");
1767 // This assert also tests that nodes are BoxLock.
1768 assert(BoxLockNode::same_slot(n, m), "sanity");
1769 C->gvn_replace_by(n, m);
1770 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1771 phi = ensure_phi(j, nophi);
1772 }
1773 }
1774 break;
1775 }
1776 }
1777 // At this point, n might be top if:
1778 // - there is no phi (because TypeFlow detected a conflict), or
1779 // - the corresponding control edges is top (a dead incoming path)
1780 // It is a bug if we create a phi which sees a garbage value on a live path.
1781
1782 if (phi != NULL) {
1783 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1784 assert(phi->region() == r, "");
1785 phi->set_req(pnum, n); // Then add 'n' to the merge
1786 if (pnum == PhiNode::Input) {
1787 // Last merge for this Phi.
1788 // So far, Phis have had a reasonable type from ciTypeFlow.
1789 // Now _gvn will join that with the meet of current inputs.
1790 // BOTTOM is never permissible here, 'cause pessimistically
1791 // Phis of pointers cannot lose the basic pointer type.
1792 debug_only(const Type* bt1 = phi->bottom_type());
1793 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1794 map()->set_req(j, _gvn.transform_no_reclaim(phi));
1795 debug_only(const Type* bt2 = phi->bottom_type());
1796 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1797 record_for_igvn(phi);
1798 }
1799 }
1800 } // End of for all values to be merged
1801
1802 if (pnum == PhiNode::Input &&
1803 !r->in(0)) { // The occasional useless Region
1804 assert(control() == r, "");
1805 set_control(r->nonnull_req());
1806 }
1807
1808 map()->merge_replaced_nodes_with(newin);
1809
1810 // newin has been subsumed into the lazy merge, and is now dead.
1811 set_block(save_block);
1812
1813 stop(); // done with this guy, for now
1814 }
1815
1816 if (TraceOptoParse) {
1817 tty->print_cr(" on path %d", pnum);
1818 }
1819
1820 // Done with this parser state.
1821 assert(stopped(), "");
1822 }
1823
1935
1936 // Add new path to the region.
1937 uint pnum = r->req();
1938 r->add_req(NULL);
1939
1940 for (uint i = 1; i < map->req(); i++) {
1941 Node* n = map->in(i);
1942 if (i == TypeFunc::Memory) {
1943 // Ensure a phi on all currently known memories.
1944 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1945 Node* phi = mms.memory();
1946 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1947 assert(phi->req() == pnum, "must be same size as region");
1948 phi->add_req(NULL);
1949 }
1950 }
1951 } else {
1952 if (n->is_Phi() && n->as_Phi()->region() == r) {
1953 assert(n->req() == pnum, "must be same size as region");
1954 n->add_req(NULL);
1955 }
1956 }
1957 }
1958
1959 return pnum;
1960 }
1961
1962 //------------------------------ensure_phi-------------------------------------
1963 // Turn the idx'th entry of the current map into a Phi
1964 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
1965 SafePointNode* map = this->map();
1966 Node* region = map->control();
1967 assert(region->is_Region(), "");
1968
1969 Node* o = map->in(idx);
1970 assert(o != NULL, "");
1971
1972 if (o == top()) return NULL; // TOP always merges into TOP
1973
1974 if (o->is_Phi() && o->as_Phi()->region() == region) {
1975 return o->as_Phi();
1976 }
1977
1978 // Now use a Phi here for merging
1979 assert(!nocreate, "Cannot build a phi for a block already parsed.");
1980 const JVMState* jvms = map->jvms();
1981 const Type* t = NULL;
1982 if (jvms->is_loc(idx)) {
1983 t = block()->local_type_at(idx - jvms->locoff());
1984 } else if (jvms->is_stk(idx)) {
1985 t = block()->stack_type_at(idx - jvms->stkoff());
1986 } else if (jvms->is_mon(idx)) {
1987 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
1988 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
1989 } else if ((uint)idx < TypeFunc::Parms) {
1990 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
1991 } else {
1992 assert(false, "no type information for this phi");
1993 }
1994
1995 // If the type falls to bottom, then this must be a local that
1996 // is mixing ints and oops or some such. Forcing it to top
1997 // makes it go dead.
1998 if (t == Type::BOTTOM) {
1999 map->set_req(idx, top());
2000 return NULL;
2001 }
2002
2003 // Do not create phis for top either.
2004 // A top on a non-null control flow must be an unused even after the.phi.
2005 if (t == Type::TOP || t == Type::HALF) {
2006 map->set_req(idx, top());
2007 return NULL;
2008 }
2009
2010 PhiNode* phi = PhiNode::make(region, o, t);
2011 gvn().set_type(phi, t);
2012 if (C->do_escape_analysis()) record_for_igvn(phi);
2013 map->set_req(idx, phi);
2014 return phi;
2015 }
2016
2017 //--------------------------ensure_memory_phi----------------------------------
2018 // Turn the idx'th slice of the current memory into a Phi
2019 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2020 MergeMemNode* mem = merged_memory();
2021 Node* region = control();
2022 assert(region->is_Region(), "");
2023
2024 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2025 assert(o != NULL && o != top(), "");
2026
2027 PhiNode* phi;
2028 if (o->is_Phi() && o->as_Phi()->region() == region) {
2029 phi = o->as_Phi();
2030 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2031 // clone the shared base memory phi to make a new memory split
2032 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2033 const Type* t = phi->bottom_type();
2034 const TypePtr* adr_type = C->get_adr_type(idx);
2162 Node* chk = _gvn.transform( new CmpINode(opq, profile_state) );
2163 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
2164 // Branch to failure if state was changed
2165 { BuildCutout unless(this, tst, PROB_ALWAYS);
2166 uncommon_trap(Deoptimization::Reason_rtm_state_change,
2167 Deoptimization::Action_make_not_entrant);
2168 }
2169 }
2170 #endif
2171 }
2172
2173 //------------------------------return_current---------------------------------
2174 // Append current _map to _exit_return
2175 void Parse::return_current(Node* value) {
2176 if (RegisterFinalizersAtInit &&
2177 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2178 call_register_finalizer();
2179 }
2180
2181 // Do not set_parse_bci, so that return goo is credited to the return insn.
2182 set_bci(InvocationEntryBci);
2183 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2184 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2185 }
2186 if (C->env()->dtrace_method_probes()) {
2187 make_dtrace_method_exit(method());
2188 }
2189 SafePointNode* exit_return = _exits.map();
2190 exit_return->in( TypeFunc::Control )->add_req( control() );
2191 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2192 Node *mem = exit_return->in( TypeFunc::Memory );
2193 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2194 if (mms.is_empty()) {
2195 // get a copy of the base memory, and patch just this one input
2196 const TypePtr* adr_type = mms.adr_type(C);
2197 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2198 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2199 gvn().set_type_bottom(phi);
2200 phi->del_req(phi->req()-1); // prepare to re-patch
2201 mms.set_memory(phi);
2202 }
2203 mms.memory()->add_req(mms.memory2());
2204 }
2205
2206 // frame pointer is always same, already captured
2207 if (value != NULL) {
2208 // If returning oops to an interface-return, there is a silent free
2209 // cast from oop to interface allowed by the Verifier. Make it explicit
2210 // here.
2211 Node* phi = _exits.argument(0);
2212 phi->add_req(value);
2213 }
2214
2215 if (_first_return) {
2216 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2217 _first_return = false;
2218 } else {
2219 _exits.map()->merge_replaced_nodes_with(map());
2220 }
2221
2222 stop_and_kill_map(); // This CFG path dies here
2223 }
2224
2225
2226 //------------------------------add_safepoint----------------------------------
2227 void Parse::add_safepoint() {
2228 uint parms = TypeFunc::Parms+1;
2229
2230 // Clear out dead values from the debug info.
2231 kill_dead_locals();
2232
2233 // Clone the JVM State
2234 SafePointNode *sfpnt = new SafePointNode(parms, NULL);
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/method.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/c2compiler.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/convertnode.hpp"
34 #include "opto/idealGraphPrinter.hpp"
35 #include "opto/inlinetypenode.hpp"
36 #include "opto/locknode.hpp"
37 #include "opto/memnode.hpp"
38 #include "opto/opaquenode.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/type.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/safepointMechanism.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/bitMap.inline.hpp"
47 #include "utilities/copy.hpp"
48
49 // Static array so we can figure out which bytecodes stop us from compiling
50 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
51 // and eventually should be encapsulated in a proper class (gri 8/18/98).
52
53 #ifndef PRODUCT
54 int nodes_created = 0;
55 int methods_parsed = 0;
87 }
88 if (all_null_checks_found) {
89 tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks,
90 (100*implicit_null_checks)/all_null_checks_found);
91 }
92 if (SharedRuntime::_implicit_null_throws) {
93 tty->print_cr("%d implicit null exceptions at runtime",
94 SharedRuntime::_implicit_null_throws);
95 }
96
97 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
98 BytecodeParseHistogram::print();
99 }
100 }
101 #endif
102
103 //------------------------------ON STACK REPLACEMENT---------------------------
104
105 // Construct a node which can be used to get incoming state for
106 // on stack replacement.
107 Node* Parse::fetch_interpreter_state(int index,
108 const Type* type,
109 Node* local_addrs,
110 Node* local_addrs_base) {
111 BasicType bt = type->basic_type();
112 if (type == TypePtr::NULL_PTR) {
113 // Ptr types are mixed together with T_ADDRESS but NULL is
114 // really for T_OBJECT types so correct it.
115 bt = T_OBJECT;
116 }
117 Node *mem = memory(Compile::AliasIdxRaw);
118 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
119 Node *ctl = control();
120
121 // Very similar to LoadNode::make, except we handle un-aligned longs and
122 // doubles on Sparc. Intel can handle them just fine directly.
123 Node *l = NULL;
124 switch (bt) { // Signature is flattened
125 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
126 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
127 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
128 case T_PRIMITIVE_OBJECT:
129 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
130 case T_LONG:
131 case T_DOUBLE: {
132 // Since arguments are in reverse order, the argument address 'adr'
133 // refers to the back half of the long/double. Recompute adr.
134 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
135 if (Matcher::misaligned_doubles_ok) {
136 l = (bt == T_DOUBLE)
137 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
138 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
139 } else {
140 l = (bt == T_DOUBLE)
141 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
142 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
143 }
144 break;
145 }
146 default: ShouldNotReachHere();
147 }
148 return _gvn.transform(l);
149 }
150
151 // Helper routine to prevent the interpreter from handing
152 // unexpected typestate to an OSR method.
153 // The Node l is a value newly dug out of the interpreter frame.
154 // The type is the type predicted by ciTypeFlow. Note that it is
155 // not a general type, but can only come from Type::get_typeflow_type.
156 // The safepoint is a map which will feed an uncommon trap.
157 Node* Parse::check_interpreter_type(Node* l, const Type* type,
158 SafePointNode* &bad_type_exit) {
159 const TypeOopPtr* tp = type->isa_oopptr();
160
161 // TypeFlow may assert null-ness if a type appears unloaded.
162 if (type == TypePtr::NULL_PTR ||
163 (tp != NULL && !tp->is_loaded())) {
164 // Value must be null, not a real oop.
165 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
166 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
167 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
168 set_control(_gvn.transform( new IfTrueNode(iff) ));
169 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
170 bad_type_exit->control()->add_req(bad_type);
171 l = null();
172 }
173
174 // Typeflow can also cut off paths from the CFG, based on
175 // types which appear unloaded, or call sites which appear unlinked.
176 // When paths are cut off, values at later merge points can rise
177 // toward more specific classes. Make sure these specific classes
178 // are still in effect.
179 if (tp != NULL && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
180 // TypeFlow asserted a specific object type. Value must have that type.
181 Node* bad_type_ctrl = NULL;
182 if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
183 // Check inline types for null here to prevent checkcast from adding an
184 // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
185 l = null_check_oop(l, &bad_type_ctrl);
186 bad_type_exit->control()->add_req(bad_type_ctrl);
187 }
188 l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
189 bad_type_exit->control()->add_req(bad_type_ctrl);
190 }
191
192 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
193 return l;
194 }
195
196 // Helper routine which sets up elements of the initial parser map when
197 // performing a parse for on stack replacement. Add values into map.
198 // The only parameter contains the address of a interpreter arguments.
199 void Parse::load_interpreter_state(Node* osr_buf) {
200 int index;
201 int max_locals = jvms()->loc_size();
202 int max_stack = jvms()->stk_size();
203
204 // Mismatch between method and jvms can occur since map briefly held
205 // an OSR entry state (which takes up one RawPtr word).
206 assert(max_locals == method()->max_locals(), "sanity");
207 assert(max_stack >= method()->max_stack(), "sanity");
208 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
209 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
210
211 // Find the start block.
212 Block* osr_block = start_block();
213 assert(osr_block->start() == osr_bci(), "sanity");
214
215 // Set initial BCI.
216 set_parse_bci(osr_block->start());
217
218 // Set initial stack depth.
219 set_sp(osr_block->start_sp());
220
221 // Check bailouts. We currently do not perform on stack replacement
222 // of loops in catch blocks or loops which branch with a non-empty stack.
223 if (sp() != 0) {
224 C->record_method_not_compilable("OSR starts with non-empty stack");
225 return;
226 }
227 // Do not OSR inside finally clauses:
228 if (osr_block->has_trap_at(osr_block->start())) {
229 C->record_method_not_compilable("OSR starts with an immediate trap");
230 return;
231 }
232
233 // Commute monitors from interpreter frame to compiler frame.
234 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
235 int mcnt = osr_block->flow()->monitor_count();
236 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
237 for (index = 0; index < mcnt; index++) {
238 // Make a BoxLockNode for the monitor.
239 Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
240
241 // Displaced headers and locked objects are interleaved in the
242 // temp OSR buffer. We only copy the locked objects out here.
243 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
244 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
245 // Try and copy the displaced header to the BoxNode
246 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
247
248 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
249
250 // Build a bogus FastLockNode (no code will be generated) and push the
251 // monitor into our debug info.
252 const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
253 map()->push_monitor(flock);
254
255 // If the lock is our method synchronization lock, tuck it away in
256 // _sync_lock for return and rethrow exit paths.
257 if (index == 0 && method()->is_synchronized()) {
258 _synch_lock = flock;
259 }
260 }
261
262 // Use the raw liveness computation to make sure that unexpected
263 // values don't propagate into the OSR frame.
264 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
265 if (!live_locals.is_valid()) {
266 // Degenerate or breakpointed method.
293 if (C->log() != NULL) {
294 C->log()->elem("OSR_mismatch local_index='%d'",index);
295 }
296 set_local(index, null());
297 // and ignore it for the loads
298 continue;
299 }
300 }
301
302 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
303 if (type == Type::TOP || type == Type::HALF) {
304 continue;
305 }
306 // If the type falls to bottom, then this must be a local that
307 // is mixing ints and oops or some such. Forcing it to top
308 // makes it go dead.
309 if (type == Type::BOTTOM) {
310 continue;
311 }
312 // Construct code to access the appropriate local.
313 Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
314 set_local(index, value);
315 }
316
317 // Extract the needed stack entries from the interpreter frame.
318 for (index = 0; index < sp(); index++) {
319 const Type *type = osr_block->stack_type_at(index);
320 if (type != Type::TOP) {
321 // Currently the compiler bails out when attempting to on stack replace
322 // at a bci with a non-empty stack. We should not reach here.
323 ShouldNotReachHere();
324 }
325 }
326
327 // End the OSR migration
328 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
329 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
330 "OSR_migration_end", TypeRawPtr::BOTTOM,
331 osr_buf);
332
333 // Now that the interpreter state is loaded, make sure it will match
583 do_method_entry();
584 }
585
586 if (depth() == 1 && !failing()) {
587 if (C->clinit_barrier_on_entry()) {
588 // Add check to deoptimize the nmethod once the holder class is fully initialized
589 clinit_deopt();
590 }
591
592 // Add check to deoptimize the nmethod if RTM state was changed
593 rtm_deopt();
594 }
595
596 // Check for bailouts during method entry or RTM state check setup.
597 if (failing()) {
598 if (log) log->done("parse");
599 C->set_default_node_notes(caller_nn);
600 return;
601 }
602
603 // Handle inline type arguments
604 int arg_size = method()->arg_size();
605 for (int i = 0; i < arg_size; i++) {
606 Node* parm = local(i);
607 const Type* t = _gvn.type(parm);
608 if (t->is_inlinetypeptr()) {
609 // Create InlineTypeNode from the oop and replace the parameter
610 Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass(), !t->maybe_null());
611 set_local(i, vt);
612 } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() &&
613 t->isa_aryptr() != NULL && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_not_null_free()) {
614 // Speculate on varargs Object array being not null-free (and therefore also not flattened)
615 const TypePtr* spec_type = t->speculative();
616 spec_type = (spec_type != NULL && spec_type->isa_aryptr() != NULL) ? spec_type : t->is_aryptr();
617 spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free();
618 spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
619 Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
620 set_local(i, cast);
621 }
622 }
623
624 entry_map = map(); // capture any changes performed by method setup code
625 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
626
627 // We begin parsing as if we have just encountered a jump to the
628 // method entry.
629 Block* entry_block = start_block();
630 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
631 set_map_clone(entry_map);
632 merge_common(entry_block, entry_block->next_path_num());
633
634 #ifndef PRODUCT
635 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
636 set_parse_histogram( parse_histogram_obj );
637 #endif
638
639 // Parse all the basic blocks.
640 do_all_blocks();
641
642 C->set_default_node_notes(caller_nn);
643
786 void Parse::build_exits() {
787 // make a clone of caller to prevent sharing of side-effects
788 _exits.set_map(_exits.clone_map());
789 _exits.clean_stack(_exits.sp());
790 _exits.sync_jvms();
791
792 RegionNode* region = new RegionNode(1);
793 record_for_igvn(region);
794 gvn().set_type_bottom(region);
795 _exits.set_control(region);
796
797 // Note: iophi and memphi are not transformed until do_exits.
798 Node* iophi = new PhiNode(region, Type::ABIO);
799 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
800 gvn().set_type_bottom(iophi);
801 gvn().set_type_bottom(memphi);
802 _exits.set_i_o(iophi);
803 _exits.set_all_memory(memphi);
804
805 // Add a return value to the exit state. (Do not push it yet.)
806 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
807 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
808 if (ret_type->isa_int()) {
809 BasicType ret_bt = method()->return_type()->basic_type();
810 if (ret_bt == T_BOOLEAN ||
811 ret_bt == T_CHAR ||
812 ret_bt == T_BYTE ||
813 ret_bt == T_SHORT) {
814 ret_type = TypeInt::INT;
815 }
816 }
817
818 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
819 // becomes loaded during the subsequent parsing, the loaded and unloaded
820 // types will not join when we transform and push in do_exits().
821 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
822 if (ret_oop_type && !ret_oop_type->is_loaded()) {
823 ret_type = TypeOopPtr::BOTTOM;
824 }
825 int ret_size = type2size[ret_type->basic_type()];
826 Node* ret_phi = new PhiNode(region, ret_type);
827 gvn().set_type_bottom(ret_phi);
828 _exits.ensure_stack(ret_size);
829 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
830 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
831 _exits.set_argument(0, ret_phi); // here is where the parser finds it
832 // Note: ret_phi is not yet pushed, until do_exits.
833 }
834 }
835
836 //----------------------------build_start_state-------------------------------
837 // Construct a state which contains only the incoming arguments from an
838 // unknown caller. The method & bci will be NULL & InvocationEntryBci.
839 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
840 int arg_size = tf->domain_sig()->cnt();
841 int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
842 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
843 SafePointNode* map = new SafePointNode(max_size, jvms);
844 jvms->set_map(map);
845 record_for_igvn(map);
846 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
847 Node_Notes* old_nn = default_node_notes();
848 if (old_nn != NULL && has_method()) {
849 Node_Notes* entry_nn = old_nn->clone(this);
850 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
851 entry_jvms->set_offsets(0);
852 entry_jvms->set_bci(entry_bci());
853 entry_nn->set_jvms(entry_jvms);
854 set_default_node_notes(entry_nn);
855 }
856 PhaseGVN& gvn = *initial_gvn();
857 uint i = 0;
858 int arg_num = 0;
859 for (uint j = 0; i < (uint)arg_size; i++) {
860 const Type* t = tf->domain_sig()->field_at(i);
861 Node* parm = NULL;
862 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
863 // Inline type arguments are not passed by reference: we get an argument per
864 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
865 GraphKit kit(jvms, &gvn);
866 kit.set_control(map->control());
867 Node* old_mem = map->memory();
868 // Use immutable memory for inline type loads and restore it below
869 kit.set_all_memory(C->immutable_memory());
870 parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
871 map->set_control(kit.control());
872 map->set_memory(old_mem);
873 } else {
874 parm = gvn.transform(new ParmNode(start, j++));
875 }
876 map->init_req(i, parm);
877 // Record all these guys for later GVN.
878 record_for_igvn(parm);
879 if (i >= TypeFunc::Parms && t != Type::HALF) {
880 arg_num++;
881 }
882 }
883 for (; i < map->req(); i++) {
884 map->init_req(i, top());
885 }
886 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
887 set_default_node_notes(old_nn);
888 return jvms;
889 }
890
891 //-----------------------------make_node_notes---------------------------------
892 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
893 if (caller_nn == NULL) return NULL;
894 Node_Notes* nn = caller_nn->clone(C);
895 JVMState* caller_jvms = nn->jvms();
896 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
897 jvms->set_offsets(0);
898 jvms->set_bci(_entry_bci);
899 nn->set_jvms(jvms);
900 return nn;
901 }
902
903
904 //--------------------------return_values--------------------------------------
905 void Compile::return_values(JVMState* jvms) {
906 GraphKit kit(jvms);
907 Node* ret = new ReturnNode(TypeFunc::Parms,
908 kit.control(),
909 kit.i_o(),
910 kit.reset_memory(),
911 kit.frameptr(),
912 kit.returnadr());
913 // Add zero or 1 return values
914 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
915 if (ret_size > 0) {
916 kit.inc_sp(-ret_size); // pop the return value(s)
917 kit.sync_jvms();
918 Node* res = kit.argument(0);
919 if (tf()->returns_inline_type_as_fields()) {
920 // Multiple return values (inline type fields): add as many edges
921 // to the Return node as returned values.
922 InlineTypeNode* vt = res->as_InlineType();
923 ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms);
924 if (vt->is_allocated(&kit.gvn()) && !StressInlineTypeReturnedAsFields) {
925 ret->init_req(TypeFunc::Parms, vt->get_oop());
926 } else {
927 // Return the tagged klass pointer to signal scalarization to the caller
928 Node* tagged_klass = vt->tagged_klass(kit.gvn());
929 if (!method()->signature()->returns_null_free_inline_type()) {
930 // Return null if the inline type is null (IsInit field is not set)
931 Node* conv = kit.gvn().transform(new ConvI2LNode(vt->get_is_init()));
932 Node* shl = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
933 Node* shr = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
934 tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
935 }
936 ret->init_req(TypeFunc::Parms, tagged_klass);
937 }
938 uint idx = TypeFunc::Parms + 1;
939 vt->pass_fields(&kit, ret, idx, false, method()->signature()->returns_null_free_inline_type());
940 } else {
941 ret->add_req(res);
942 // Note: The second dummy edge is not needed by a ReturnNode.
943 }
944 }
945 // bind it to root
946 root()->add_req(ret);
947 record_for_igvn(ret);
948 initial_gvn()->transform_no_reclaim(ret);
949 }
950
951 //------------------------rethrow_exceptions-----------------------------------
952 // Bind all exception states in the list into a single RethrowNode.
953 void Compile::rethrow_exceptions(JVMState* jvms) {
954 GraphKit kit(jvms);
955 if (!kit.has_exceptions()) return; // nothing to generate
956 // Load my combined exception state into the kit, with all phis transformed:
957 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
958 Node* ex_oop = kit.use_exception_state(ex_map);
959 RethrowNode* exit = new RethrowNode(kit.control(),
960 kit.i_o(), kit.reset_memory(),
961 kit.frameptr(), kit.returnadr(),
962 // like a return but with exception input
963 ex_oop);
1047 // to complete, we force all writes to complete.
1048 //
1049 // 2. Experimental VM option is used to force the barrier if any field
1050 // was written out in the constructor.
1051 //
1052 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1053 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1054 // MemBarVolatile is used before volatile load instead of after volatile
1055 // store, so there's no barrier after the store.
1056 // We want to guarantee the same behavior as on platforms with total store
1057 // order, although this is not required by the Java memory model.
1058 // In this case, we want to enforce visibility of volatile field
1059 // initializations which are performed in constructors.
1060 // So as with finals, we add a barrier here.
1061 //
1062 // "All bets are off" unless the first publication occurs after a
1063 // normal return from the constructor. We do not attempt to detect
1064 // such unusual early publications. But no barrier is needed on
1065 // exceptional returns, since they cannot publish normally.
1066 //
1067 if (method()->is_object_constructor_or_class_initializer() &&
1068 (wrote_final() ||
1069 (AlwaysSafeConstructors && wrote_fields()) ||
1070 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1071 _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1072
1073 // If Memory barrier is created for final fields write
1074 // and allocation node does not escape the initialize method,
1075 // then barrier introduced by allocation node can be removed.
1076 if (DoEscapeAnalysis && alloc_with_final()) {
1077 AllocateNode *alloc = AllocateNode::Ideal_allocation(alloc_with_final(), &_gvn);
1078 alloc->compute_MemBar_redundancy(method());
1079 }
1080 if (PrintOpto && (Verbose || WizardMode)) {
1081 method()->print_name();
1082 tty->print_cr(" writes finals and needs a memory barrier");
1083 }
1084 }
1085
1086 // Any method can write a @Stable field; insert memory barriers
1087 // after those also. Can't bind predecessor allocation node (if any)
1088 // with barrier because allocation doesn't always dominate
1089 // MemBarRelease.
1090 if (wrote_stable()) {
1091 _exits.insert_mem_bar(Op_MemBarRelease);
1092 if (PrintOpto && (Verbose || WizardMode)) {
1093 method()->print_name();
1094 tty->print_cr(" writes @Stable and needs a memory barrier");
1095 }
1096 }
1097
1098 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1099 // transform each slice of the original memphi:
1100 mms.set_memory(_gvn.transform(mms.memory()));
1101 }
1102 // Clean up input MergeMems created by transforming the slices
1103 _gvn.transform(_exits.merged_memory());
1104
1105 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1106 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1107 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1108 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1109 // If the type we set for the ret_phi in build_exits() is too optimistic and
1110 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1111 // loading. It could also be due to an error, so mark this method as not compilable because
1112 // otherwise this could lead to an infinite compile loop.
1113 // In any case, this code path is rarely (and never in my testing) reached.
1114 C->record_method_not_compilable("Can't determine return type.");
1115 return;
1116 }
1117 if (ret_type->isa_int()) {
1118 BasicType ret_bt = method()->return_type()->basic_type();
1119 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1120 }
1121 _exits.push_node(ret_type->basic_type(), ret_phi);
1122 }
1123
1124 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1125
1126 // Unlock along the exceptional paths.
1179 }
1180
1181 //-----------------------------create_entry_map-------------------------------
1182 // Initialize our parser map to contain the types at method entry.
1183 // For OSR, the map contains a single RawPtr parameter.
1184 // Initial monitor locking for sync. methods is performed by do_method_entry.
1185 SafePointNode* Parse::create_entry_map() {
1186 // Check for really stupid bail-out cases.
1187 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1188 if (len >= 32760) {
1189 C->record_method_not_compilable("too many local variables");
1190 return NULL;
1191 }
1192
1193 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1194 _caller->map()->delete_replaced_nodes();
1195
1196 // If this is an inlined method, we may have to do a receiver null check.
1197 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1198 GraphKit kit(_caller);
1199 kit.null_check_receiver_before_call(method(), false);
1200 _caller = kit.transfer_exceptions_into_jvms();
1201 if (kit.stopped()) {
1202 _exits.add_exception_states_from(_caller);
1203 _exits.set_jvms(_caller);
1204 return NULL;
1205 }
1206 }
1207
1208 assert(method() != NULL, "parser must have a method");
1209
1210 // Create an initial safepoint to hold JVM state during parsing
1211 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
1212 set_map(new SafePointNode(len, jvms));
1213 jvms->set_map(map());
1214 record_for_igvn(map());
1215 assert(jvms->endoff() == len, "correct jvms sizing");
1216
1217 SafePointNode* inmap = _caller->map();
1218 assert(inmap != NULL, "must have inmap");
1219 // In case of null check on receiver above
1220 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1221
1222 uint i;
1223
1224 // Pass thru the predefined input parameters.
1225 for (i = 0; i < TypeFunc::Parms; i++) {
1226 map()->init_req(i, inmap->in(i));
1227 }
1228
1229 if (depth() == 1) {
1230 assert(map()->memory()->Opcode() == Op_Parm, "");
1231 // Insert the memory aliasing node
1232 set_all_memory(reset_memory());
1233 }
1234 assert(merged_memory(), "");
1235
1236 // Now add the locals which are initially bound to arguments:
1237 uint arg_size = tf()->domain_sig()->cnt();
1238 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1239 for (i = TypeFunc::Parms; i < arg_size; i++) {
1240 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1241 }
1242
1243 // Clear out the rest of the map (locals and stack)
1244 for (i = arg_size; i < len; i++) {
1245 map()->init_req(i, top());
1246 }
1247
1248 SafePointNode* entry_map = stop();
1249 return entry_map;
1250 }
1251
1252 //-----------------------------do_method_entry--------------------------------
1253 // Emit any code needed in the pseudo-block before BCI zero.
1254 // The main thing to do is lock the receiver of a synchronized method.
1255 void Parse::do_method_entry() {
1256 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1257 set_sp(0); // Java Stack Pointer
1291
1292 // If the method is synchronized, we need to construct a lock node, attach
1293 // it to the Start node, and pin it there.
1294 if (method()->is_synchronized()) {
1295 // Insert a FastLockNode right after the Start which takes as arguments
1296 // the current thread pointer, the "this" pointer & the address of the
1297 // stack slot pair used for the lock. The "this" pointer is a projection
1298 // off the start node, but the locking spot has to be constructed by
1299 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1300 // becomes the second argument to the FastLockNode call. The
1301 // FastLockNode becomes the new control parent to pin it to the start.
1302
1303 // Setup Object Pointer
1304 Node *lock_obj = NULL;
1305 if (method()->is_static()) {
1306 ciInstance* mirror = _method->holder()->java_mirror();
1307 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1308 lock_obj = makecon(t_lock);
1309 } else { // Else pass the "this" pointer,
1310 lock_obj = local(0); // which is Parm0 from StartNode
1311 assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1312 }
1313 // Clear out dead values from the debug info.
1314 kill_dead_locals();
1315 // Build the FastLockNode
1316 _synch_lock = shared_lock(lock_obj);
1317 }
1318
1319 // Feed profiling data for parameters to the type system so it can
1320 // propagate it as speculative types
1321 record_profiled_parameters_for_speculation();
1322 }
1323
1324 //------------------------------init_blocks------------------------------------
1325 // Initialize our parser map to contain the types/monitors at method entry.
1326 void Parse::init_blocks() {
1327 // Create the blocks.
1328 _block_count = flow()->block_count();
1329 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1330
1331 // Initialize the structs.
1703 //--------------------handle_missing_successor---------------------------------
1704 void Parse::handle_missing_successor(int target_bci) {
1705 #ifndef PRODUCT
1706 Block* b = block();
1707 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1708 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1709 #endif
1710 ShouldNotReachHere();
1711 }
1712
1713 //--------------------------merge_common---------------------------------------
1714 void Parse::merge_common(Parse::Block* target, int pnum) {
1715 if (TraceOptoParse) {
1716 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1717 }
1718
1719 // Zap extra stack slots to top
1720 assert(sp() == target->start_sp(), "");
1721 clean_stack(sp());
1722
1723 // Check for merge conflicts involving inline types
1724 JVMState* old_jvms = map()->jvms();
1725 int old_bci = bci();
1726 JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1727 tmp_jvms->set_should_reexecute(true);
1728 tmp_jvms->bind_map(map());
1729 // Execution needs to restart a the next bytecode (entry of next
1730 // block)
1731 if (target->is_merged() ||
1732 pnum > PhiNode::Input ||
1733 target->is_handler() ||
1734 target->is_loop_head()) {
1735 set_parse_bci(target->start());
1736 for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1737 Node* n = map()->in(j); // Incoming change to target state.
1738 const Type* t = NULL;
1739 if (tmp_jvms->is_loc(j)) {
1740 t = target->local_type_at(j - tmp_jvms->locoff());
1741 } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1742 t = target->stack_type_at(j - tmp_jvms->stkoff());
1743 }
1744 if (t != NULL && t != Type::BOTTOM) {
1745 if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1746 // Allocate inline type in src block to be able to merge it with oop in target block
1747 map()->set_req(j, n->as_InlineType()->buffer(this));
1748 } else if (!n->is_InlineType() && t->is_inlinetypeptr()) {
1749 // Scalarize null in src block to be able to merge it with inline type in target block
1750 assert(gvn().type(n)->is_zero_type(), "Should have been scalarized");
1751 map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1752 }
1753 }
1754 }
1755 }
1756 old_jvms->bind_map(map());
1757 set_parse_bci(old_bci);
1758
1759 if (!target->is_merged()) { // No prior mapping at this bci
1760 if (TraceOptoParse) { tty->print(" with empty state"); }
1761
1762 // If this path is dead, do not bother capturing it as a merge.
1763 // It is "as if" we had 1 fewer predecessors from the beginning.
1764 if (stopped()) {
1765 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1766 return;
1767 }
1768
1769 // Make a region if we know there are multiple or unpredictable inputs.
1770 // (Also, if this is a plain fall-through, we might see another region,
1771 // which must not be allowed into this block's map.)
1772 if (pnum > PhiNode::Input // Known multiple inputs.
1773 || target->is_handler() // These have unpredictable inputs.
1774 || target->is_loop_head() // Known multiple inputs
1775 || control()->is_Region()) { // We must hide this guy.
1776
1777 int current_bci = bci();
1778 set_parse_bci(target->start()); // Set target bci
1792 gvn().set_type(r, Type::CONTROL);
1793 record_for_igvn(r);
1794 // zap all inputs to NULL for debugging (done in Node(uint) constructor)
1795 // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
1796 r->init_req(pnum, control());
1797 set_control(r);
1798 set_parse_bci(current_bci); // Restore bci
1799 }
1800
1801 // Convert the existing Parser mapping into a mapping at this bci.
1802 store_state_to(target);
1803 assert(target->is_merged(), "do not come here twice");
1804
1805 } else { // Prior mapping at this bci
1806 if (TraceOptoParse) { tty->print(" with previous state"); }
1807 #ifdef ASSERT
1808 if (target->is_SEL_head()) {
1809 target->mark_merged_backedge(block());
1810 }
1811 #endif
1812
1813 // We must not manufacture more phis if the target is already parsed.
1814 bool nophi = target->is_parsed();
1815
1816 SafePointNode* newin = map();// Hang on to incoming mapping
1817 Block* save_block = block(); // Hang on to incoming block;
1818 load_state_from(target); // Get prior mapping
1819
1820 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1821 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1822 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1823 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1824
1825 // Iterate over my current mapping and the old mapping.
1826 // Where different, insert Phi functions.
1827 // Use any existing Phi functions.
1828 assert(control()->is_Region(), "must be merging to a region");
1829 RegionNode* r = control()->as_Region();
1830
1831 // Compute where to merge into
1832 // Merge incoming control path
1833 r->init_req(pnum, newin->control());
1834
1835 if (pnum == 1) { // Last merge for this Region?
1836 if (!block()->flow()->is_irreducible_entry()) {
1837 Node* result = _gvn.transform_no_reclaim(r);
1838 if (r != result && TraceOptoParse) {
1839 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1840 }
1841 }
1842 record_for_igvn(r);
1843 }
1844
1845 // Update all the non-control inputs to map:
1846 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1847 bool check_elide_phi = target->is_SEL_backedge(save_block);
1848 bool last_merge = (pnum == PhiNode::Input);
1849 for (uint j = 1; j < newin->req(); j++) {
1850 Node* m = map()->in(j); // Current state of target.
1851 Node* n = newin->in(j); // Incoming change to target state.
1852 PhiNode* phi;
1853 if (m->is_Phi() && m->as_Phi()->region() == r) {
1854 phi = m->as_Phi();
1855 } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1856 phi = m->as_InlineType()->get_oop()->as_Phi();
1857 } else {
1858 phi = NULL;
1859 }
1860 if (m != n) { // Different; must merge
1861 switch (j) {
1862 // Frame pointer and Return Address never changes
1863 case TypeFunc::FramePtr:// Drop m, use the original value
1864 case TypeFunc::ReturnAdr:
1865 break;
1866 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1867 assert(phi == NULL, "the merge contains phis, not vice versa");
1868 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1869 continue;
1870 default: // All normal stuff
1871 if (phi == NULL) {
1872 const JVMState* jvms = map()->jvms();
1873 if (EliminateNestedLocks &&
1874 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1875 // BoxLock nodes are not commoning.
1876 // Use old BoxLock node as merged box.
1877 assert(newin->jvms()->is_monitor_box(j), "sanity");
1878 // This assert also tests that nodes are BoxLock.
1879 assert(BoxLockNode::same_slot(n, m), "sanity");
1880 C->gvn_replace_by(n, m);
1881 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1882 phi = ensure_phi(j, nophi);
1883 }
1884 }
1885 break;
1886 }
1887 }
1888 // At this point, n might be top if:
1889 // - there is no phi (because TypeFlow detected a conflict), or
1890 // - the corresponding control edges is top (a dead incoming path)
1891 // It is a bug if we create a phi which sees a garbage value on a live path.
1892
1893 // Merging two inline types?
1894 if (phi != NULL && phi->bottom_type()->is_inlinetypeptr()) {
1895 // Reload current state because it may have been updated by ensure_phi
1896 m = map()->in(j);
1897 InlineTypeNode* vtm = m->as_InlineType(); // Current inline type
1898 InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
1899 assert(vtm->get_oop() == phi, "Inline type should have Phi input");
1900 if (TraceOptoParse) {
1901 #ifdef ASSERT
1902 tty->print_cr("\nMerging inline types");
1903 tty->print_cr("Current:");
1904 vtm->dump(2);
1905 tty->print_cr("Incoming:");
1906 vtn->dump(2);
1907 tty->cr();
1908 #endif
1909 }
1910 // Do the merge
1911 vtm->merge_with(&_gvn, vtn, pnum, last_merge);
1912 if (last_merge) {
1913 map()->set_req(j, _gvn.transform_no_reclaim(vtm));
1914 record_for_igvn(vtm);
1915 }
1916 } else if (phi != NULL) {
1917 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1918 assert(phi->region() == r, "");
1919 phi->set_req(pnum, n); // Then add 'n' to the merge
1920 if (last_merge) {
1921 // Last merge for this Phi.
1922 // So far, Phis have had a reasonable type from ciTypeFlow.
1923 // Now _gvn will join that with the meet of current inputs.
1924 // BOTTOM is never permissible here, 'cause pessimistically
1925 // Phis of pointers cannot lose the basic pointer type.
1926 debug_only(const Type* bt1 = phi->bottom_type());
1927 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1928 map()->set_req(j, _gvn.transform_no_reclaim(phi));
1929 debug_only(const Type* bt2 = phi->bottom_type());
1930 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1931 record_for_igvn(phi);
1932 }
1933 }
1934 } // End of for all values to be merged
1935
1936 if (last_merge && !r->in(0)) { // The occasional useless Region
1937 assert(control() == r, "");
1938 set_control(r->nonnull_req());
1939 }
1940
1941 map()->merge_replaced_nodes_with(newin);
1942
1943 // newin has been subsumed into the lazy merge, and is now dead.
1944 set_block(save_block);
1945
1946 stop(); // done with this guy, for now
1947 }
1948
1949 if (TraceOptoParse) {
1950 tty->print_cr(" on path %d", pnum);
1951 }
1952
1953 // Done with this parser state.
1954 assert(stopped(), "");
1955 }
1956
2068
2069 // Add new path to the region.
2070 uint pnum = r->req();
2071 r->add_req(NULL);
2072
2073 for (uint i = 1; i < map->req(); i++) {
2074 Node* n = map->in(i);
2075 if (i == TypeFunc::Memory) {
2076 // Ensure a phi on all currently known memories.
2077 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2078 Node* phi = mms.memory();
2079 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2080 assert(phi->req() == pnum, "must be same size as region");
2081 phi->add_req(NULL);
2082 }
2083 }
2084 } else {
2085 if (n->is_Phi() && n->as_Phi()->region() == r) {
2086 assert(n->req() == pnum, "must be same size as region");
2087 n->add_req(NULL);
2088 } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2089 n->as_InlineType()->add_new_path(r);
2090 }
2091 }
2092 }
2093
2094 return pnum;
2095 }
2096
2097 //------------------------------ensure_phi-------------------------------------
2098 // Turn the idx'th entry of the current map into a Phi
2099 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2100 SafePointNode* map = this->map();
2101 Node* region = map->control();
2102 assert(region->is_Region(), "");
2103
2104 Node* o = map->in(idx);
2105 assert(o != NULL, "");
2106
2107 if (o == top()) return NULL; // TOP always merges into TOP
2108
2109 if (o->is_Phi() && o->as_Phi()->region() == region) {
2110 return o->as_Phi();
2111 }
2112 InlineTypeNode* vt = o->isa_InlineType();
2113 if (vt != NULL && vt->has_phi_inputs(region)) {
2114 return vt->get_oop()->as_Phi();
2115 }
2116
2117 // Now use a Phi here for merging
2118 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2119 const JVMState* jvms = map->jvms();
2120 const Type* t = NULL;
2121 if (jvms->is_loc(idx)) {
2122 t = block()->local_type_at(idx - jvms->locoff());
2123 } else if (jvms->is_stk(idx)) {
2124 t = block()->stack_type_at(idx - jvms->stkoff());
2125 } else if (jvms->is_mon(idx)) {
2126 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2127 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2128 } else if ((uint)idx < TypeFunc::Parms) {
2129 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2130 } else {
2131 assert(false, "no type information for this phi");
2132 }
2133
2134 // If the type falls to bottom, then this must be a local that
2135 // is already dead or is mixing ints and oops or some such.
2136 // Forcing it to top makes it go dead.
2137 if (t == Type::BOTTOM) {
2138 map->set_req(idx, top());
2139 return NULL;
2140 }
2141
2142 // Do not create phis for top either.
2143 // A top on a non-null control flow must be an unused even after the.phi.
2144 if (t == Type::TOP || t == Type::HALF) {
2145 map->set_req(idx, top());
2146 return NULL;
2147 }
2148
2149 if (vt != NULL && t->is_inlinetypeptr()) {
2150 // Inline types are merged by merging their field values.
2151 // Create a cloned InlineTypeNode with phi inputs that
2152 // represents the merged inline type and update the map.
2153 vt = vt->clone_with_phis(&_gvn, region);
2154 map->set_req(idx, vt);
2155 return vt->get_oop()->as_Phi();
2156 } else {
2157 PhiNode* phi = PhiNode::make(region, o, t);
2158 gvn().set_type(phi, t);
2159 if (C->do_escape_analysis()) record_for_igvn(phi);
2160 map->set_req(idx, phi);
2161 return phi;
2162 }
2163 }
2164
2165 //--------------------------ensure_memory_phi----------------------------------
2166 // Turn the idx'th slice of the current memory into a Phi
2167 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2168 MergeMemNode* mem = merged_memory();
2169 Node* region = control();
2170 assert(region->is_Region(), "");
2171
2172 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2173 assert(o != NULL && o != top(), "");
2174
2175 PhiNode* phi;
2176 if (o->is_Phi() && o->as_Phi()->region() == region) {
2177 phi = o->as_Phi();
2178 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2179 // clone the shared base memory phi to make a new memory split
2180 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2181 const Type* t = phi->bottom_type();
2182 const TypePtr* adr_type = C->get_adr_type(idx);
2310 Node* chk = _gvn.transform( new CmpINode(opq, profile_state) );
2311 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
2312 // Branch to failure if state was changed
2313 { BuildCutout unless(this, tst, PROB_ALWAYS);
2314 uncommon_trap(Deoptimization::Reason_rtm_state_change,
2315 Deoptimization::Action_make_not_entrant);
2316 }
2317 }
2318 #endif
2319 }
2320
2321 //------------------------------return_current---------------------------------
2322 // Append current _map to _exit_return
2323 void Parse::return_current(Node* value) {
2324 if (RegisterFinalizersAtInit &&
2325 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2326 call_register_finalizer();
2327 }
2328
2329 // Do not set_parse_bci, so that return goo is credited to the return insn.
2330 // vreturn can trigger an allocation so vreturn can throw. Setting
2331 // the bci here breaks exception handling. Commenting this out
2332 // doesn't seem to break anything.
2333 // set_bci(InvocationEntryBci);
2334 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2335 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2336 }
2337 if (C->env()->dtrace_method_probes()) {
2338 make_dtrace_method_exit(method());
2339 }
2340 // frame pointer is always same, already captured
2341 if (value != NULL) {
2342 Node* phi = _exits.argument(0);
2343 const Type* return_type = phi->bottom_type();
2344 const TypeInstPtr* tr = return_type->isa_instptr();
2345 if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2346 return_type->is_inlinetypeptr()) {
2347 // Inline type is returned as fields, make sure it is scalarized
2348 if (!value->is_InlineType()) {
2349 value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass(), method()->signature()->returns_null_free_inline_type());
2350 }
2351 if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2352 // Returning from root or an incrementally inlined method. Make sure all non-flattened
2353 // fields are buffered and re-execute if allocation triggers deoptimization.
2354 PreserveReexecuteState preexecs(this);
2355 assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2356 jvms()->set_should_reexecute(true);
2357 inc_sp(1);
2358 value = value->as_InlineType()->allocate_fields(this);
2359 }
2360 } else if (value->is_InlineType()) {
2361 // Inline type is returned as oop, make sure it is buffered and re-execute
2362 // if allocation triggers deoptimization.
2363 PreserveReexecuteState preexecs(this);
2364 jvms()->set_should_reexecute(true);
2365 inc_sp(1);
2366 value = value->as_InlineType()->buffer(this);
2367 }
2368 // ...else
2369 // If returning oops to an interface-return, there is a silent free
2370 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2371 phi->add_req(value);
2372 }
2373
2374 SafePointNode* exit_return = _exits.map();
2375 exit_return->in( TypeFunc::Control )->add_req( control() );
2376 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2377 Node *mem = exit_return->in( TypeFunc::Memory );
2378 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2379 if (mms.is_empty()) {
2380 // get a copy of the base memory, and patch just this one input
2381 const TypePtr* adr_type = mms.adr_type(C);
2382 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2383 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2384 gvn().set_type_bottom(phi);
2385 phi->del_req(phi->req()-1); // prepare to re-patch
2386 mms.set_memory(phi);
2387 }
2388 mms.memory()->add_req(mms.memory2());
2389 }
2390
2391 if (_first_return) {
2392 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2393 _first_return = false;
2394 } else {
2395 _exits.map()->merge_replaced_nodes_with(map());
2396 }
2397
2398 stop_and_kill_map(); // This CFG path dies here
2399 }
2400
2401
2402 //------------------------------add_safepoint----------------------------------
2403 void Parse::add_safepoint() {
2404 uint parms = TypeFunc::Parms+1;
2405
2406 // Clear out dead values from the debug info.
2407 kill_dead_locals();
2408
2409 // Clone the JVM State
2410 SafePointNode *sfpnt = new SafePointNode(parms, NULL);
|