1 /*
  2  * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OPTO_GRAPHKIT_HPP
 26 #define SHARE_OPTO_GRAPHKIT_HPP
 27 
 28 #include "ci/ciEnv.hpp"
 29 #include "ci/ciMethodData.hpp"
 30 #include "gc/shared/c2/barrierSetC2.hpp"
 31 #include "opto/addnode.hpp"
 32 #include "opto/callnode.hpp"
 33 #include "opto/cfgnode.hpp"
 34 #include "opto/compile.hpp"
 35 #include "opto/divnode.hpp"
 36 #include "opto/mulnode.hpp"
 37 #include "opto/phaseX.hpp"
 38 #include "opto/subnode.hpp"
 39 #include "opto/type.hpp"
 40 #include "runtime/deoptimization.hpp"
 41 
 42 class BarrierSetC2;
 43 class FastLockNode;
 44 class FastUnlockNode;
 45 class IdealKit;
 46 class LibraryCallKit;
 47 class Parse;
 48 class RootNode;
 49 
 50 //-----------------------------------------------------------------------------
 51 //----------------------------GraphKit-----------------------------------------
 52 // Toolkit for building the common sorts of subgraphs.
 53 // Does not know about bytecode parsing or type-flow results.
 54 // It is able to create graphs implementing the semantics of most
 55 // or all bytecodes, so that it can expand intrinsics and calls.
 56 // It may depend on JVMState structure, but it must not depend
 57 // on specific bytecode streams.
 58 class GraphKit : public Phase {
 59   friend class PreserveJVMState;
 60 
 61  protected:
 62   ciEnv*            _env;       // Compilation environment
 63   PhaseGVN         &_gvn;       // Some optimizations while parsing
 64   SafePointNode*    _map;       // Parser map from JVM to Nodes
 65   SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
 66   int               _bci;       // JVM Bytecode Pointer
 67   ciMethod*         _method;    // JVM Current Method
 68   BarrierSetC2*     _barrier_set;
 69 
 70  private:
 71   int               _sp;        // JVM Expression Stack Pointer; don't modify directly!
 72 
 73  private:
 74   SafePointNode*     map_not_null() const {
 75     assert(_map != NULL, "must call stopped() to test for reset compiler map");
 76     return _map;
 77   }
 78 
 79  public:
 80   GraphKit();                   // empty constructor
 81   GraphKit(JVMState* jvms);     // the JVM state on which to operate
 82 
 83 #ifdef ASSERT
 84   ~GraphKit() {
 85     assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
 86   }
 87 #endif
 88 
 89   virtual Parse*          is_Parse()          const { return NULL; }
 90   virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
 91 
 92   ciEnv*        env()               const { return _env; }
 93   PhaseGVN&     gvn()               const { return _gvn; }
 94   void*         barrier_set_state() const { return C->barrier_set_state(); }
 95 
 96   void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
 97 
 98   // Handy well-known nodes:
 99   Node*         null()          const { return zerocon(T_OBJECT); }
100   Node*         top()           const { return C->top(); }
101   RootNode*     root()          const { return C->root(); }
102 
103   // Create or find a constant node
104   Node* intcon(jint con)        const { return _gvn.intcon(con); }
105   Node* longcon(jlong con)      const { return _gvn.longcon(con); }
106   Node* integercon(jlong con, BasicType bt)   const {
107     if (bt == T_INT) {
108       return intcon(checked_cast<jint>(con));
109     }
110     assert(bt == T_LONG, "basic type not an int or long");
111     return longcon(con);
112   }
113   Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
114   Node* zerocon(BasicType bt)   const { return _gvn.zerocon(bt); }
115   // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
116 
117   jint  find_int_con(Node* n, jint value_if_unknown) {
118     return _gvn.find_int_con(n, value_if_unknown);
119   }
120   jlong find_long_con(Node* n, jlong value_if_unknown) {
121     return _gvn.find_long_con(n, value_if_unknown);
122   }
123   // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
124 
125   // JVM State accessors:
126   // Parser mapping from JVM indices into Nodes.
127   // Low slots are accessed by the StartNode::enum.
128   // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
129   // Then come JVM stack slots.
130   // Finally come the monitors, if any.
131   // See layout accessors in class JVMState.
132 
133   SafePointNode*     map()      const { return _map; }
134   bool               has_exceptions() const { return _exceptions != NULL; }
135   JVMState*          jvms()     const { return map_not_null()->_jvms; }
136   int                sp()       const { return _sp; }
137   int                bci()      const { return _bci; }
138   Bytecodes::Code    java_bc()  const;
139   ciMethod*          method()   const { return _method; }
140 
141   void set_jvms(JVMState* jvms)       { set_map(jvms->map());
142                                         assert(jvms == this->jvms(), "sanity");
143                                         _sp = jvms->sp();
144                                         _bci = jvms->bci();
145                                         _method = jvms->has_method() ? jvms->method() : NULL; }
146   void set_map(SafePointNode* m)      { _map = m; debug_only(verify_map()); }
147   void set_sp(int sp)                 { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; }
148   void clean_stack(int from_sp); // clear garbage beyond from_sp to top
149 
150   void inc_sp(int i)                  { set_sp(sp() + i); }
151   void dec_sp(int i)                  { set_sp(sp() - i); }
152   void set_bci(int bci)               { _bci = bci; }
153 
154   // Make sure jvms has current bci & sp.
155   JVMState* sync_jvms() const;
156   JVMState* sync_jvms_for_reexecute();
157 
158 #ifdef ASSERT
159   // Make sure JVMS has an updated copy of bci and sp.
160   // Also sanity-check method, depth, and monitor depth.
161   bool jvms_in_sync() const;
162 
163   // Make sure the map looks OK.
164   void verify_map() const;
165 
166   // Make sure a proposed exception state looks OK.
167   static void verify_exception_state(SafePointNode* ex_map);
168 #endif
169 
170   // Clone the existing map state.  (Implements PreserveJVMState.)
171   SafePointNode* clone_map();
172 
173   // Set the map to a clone of the given one.
174   void set_map_clone(SafePointNode* m);
175 
176   // Tell if the compilation is failing.
177   bool failing() const { return C->failing(); }
178 
179   // Set _map to NULL, signalling a stop to further bytecode execution.
180   // Preserve the map intact for future use, and return it back to the caller.
181   SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
182 
183   // Stop, but first smash the map's inputs to NULL, to mark it dead.
184   void stop_and_kill_map();
185 
186   // Tell if _map is NULL, or control is top.
187   bool stopped();
188 
189   // Tell if this method or any caller method has exception handlers.
190   bool has_ex_handler();
191 
192   // Save an exception without blowing stack contents or other JVM state.
193   // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
194   static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
195 
196   // Recover a saved exception from its map.
197   static Node* saved_ex_oop(SafePointNode* ex_map);
198 
199   // Recover a saved exception from its map, and remove it from the map.
200   static Node* clear_saved_ex_oop(SafePointNode* ex_map);
201 
202 #ifdef ASSERT
203   // Recover a saved exception from its map, and remove it from the map.
204   static bool has_saved_ex_oop(SafePointNode* ex_map);
205 #endif
206 
207   // Push an exception in the canonical position for handlers (stack(0)).
208   void push_ex_oop(Node* ex_oop) {
209     ensure_stack(1);  // ensure room to push the exception
210     set_stack(0, ex_oop);
211     set_sp(1);
212     clean_stack(1);
213   }
214 
215   // Detach and return an exception state.
216   SafePointNode* pop_exception_state() {
217     SafePointNode* ex_map = _exceptions;
218     if (ex_map != NULL) {
219       _exceptions = ex_map->next_exception();
220       ex_map->set_next_exception(NULL);
221       debug_only(verify_exception_state(ex_map));
222     }
223     return ex_map;
224   }
225 
226   // Add an exception, using the given JVM state, without commoning.
227   void push_exception_state(SafePointNode* ex_map) {
228     debug_only(verify_exception_state(ex_map));
229     ex_map->set_next_exception(_exceptions);
230     _exceptions = ex_map;
231   }
232 
233   // Turn the current JVM state into an exception state, appending the ex_oop.
234   SafePointNode* make_exception_state(Node* ex_oop);
235 
236   // Add an exception, using the given JVM state.
237   // Combine all exceptions with a common exception type into a single state.
238   // (This is done via combine_exception_states.)
239   void add_exception_state(SafePointNode* ex_map);
240 
241   // Combine all exceptions of any sort whatever into a single master state.
242   SafePointNode* combine_and_pop_all_exception_states() {
243     if (_exceptions == NULL)  return NULL;
244     SafePointNode* phi_map = pop_exception_state();
245     SafePointNode* ex_map;
246     while ((ex_map = pop_exception_state()) != NULL) {
247       combine_exception_states(ex_map, phi_map);
248     }
249     return phi_map;
250   }
251 
252   // Combine the two exception states, building phis as necessary.
253   // The second argument is updated to include contributions from the first.
254   void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
255 
256   // Reset the map to the given state.  If there are any half-finished phis
257   // in it (created by combine_exception_states), transform them now.
258   // Returns the exception oop.  (Caller must call push_ex_oop if required.)
259   Node* use_exception_state(SafePointNode* ex_map);
260 
261   // Collect exceptions from a given JVM state into my exception list.
262   void add_exception_states_from(JVMState* jvms);
263 
264   // Collect all raised exceptions into the current JVM state.
265   // Clear the current exception list and map, returns the combined states.
266   JVMState* transfer_exceptions_into_jvms();
267 
268   // Helper to throw a built-in exception.
269   // The JVMS must allow the bytecode to be re-executed via an uncommon trap.
270   void builtin_throw(Deoptimization::DeoptReason reason);
271 
272   // Helper to check the JavaThread::_should_post_on_exceptions flag
273   // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
274   void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
275                                                   bool must_throw) ;
276 
277   // Helper Functions for adding debug information
278   void kill_dead_locals();
279 #ifdef ASSERT
280   bool dead_locals_are_killed();
281 #endif
282   // The call may deoptimize.  Supply required JVM state as debug info.
283   // If must_throw is true, the call is guaranteed not to return normally.
284   void add_safepoint_edges(SafePointNode* call,
285                            bool must_throw = false);
286 
287   // How many stack inputs does the current BC consume?
288   // And, how does the stack change after the bytecode?
289   // Returns false if unknown.
290   bool compute_stack_effects(int& inputs, int& depth);
291 
292   // Add a fixed offset to a pointer
293   Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
294     return basic_plus_adr(base, ptr, MakeConX(offset));
295   }
296   Node* basic_plus_adr(Node* base, intptr_t offset) {
297     return basic_plus_adr(base, base, MakeConX(offset));
298   }
299   // Add a variable offset to a pointer
300   Node* basic_plus_adr(Node* base, Node* offset) {
301     return basic_plus_adr(base, base, offset);
302   }
303   Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
304 
305 
306   // Some convenient shortcuts for common nodes
307   Node* IfTrue(IfNode* iff)                   { return _gvn.transform(new IfTrueNode(iff));      }
308   Node* IfFalse(IfNode* iff)                  { return _gvn.transform(new IfFalseNode(iff));     }
309 
310   Node* AddI(Node* l, Node* r)                { return _gvn.transform(new AddINode(l, r));       }
311   Node* SubI(Node* l, Node* r)                { return _gvn.transform(new SubINode(l, r));       }
312   Node* MulI(Node* l, Node* r)                { return _gvn.transform(new MulINode(l, r));       }
313   Node* DivI(Node* ctl, Node* l, Node* r)     { return _gvn.transform(new DivINode(ctl, l, r));  }
314 
315   Node* AndI(Node* l, Node* r)                { return _gvn.transform(new AndINode(l, r));       }
316   Node* OrI(Node* l, Node* r)                 { return _gvn.transform(new OrINode(l, r));        }
317   Node* XorI(Node* l, Node* r)                { return _gvn.transform(new XorINode(l, r));       }
318 
319   Node* MaxI(Node* l, Node* r)                { return _gvn.transform(new MaxINode(l, r));       }
320   Node* MinI(Node* l, Node* r)                { return _gvn.transform(new MinINode(l, r));       }
321 
322   Node* LShiftI(Node* l, Node* r)             { return _gvn.transform(new LShiftINode(l, r));    }
323   Node* RShiftI(Node* l, Node* r)             { return _gvn.transform(new RShiftINode(l, r));    }
324   Node* URShiftI(Node* l, Node* r)            { return _gvn.transform(new URShiftINode(l, r));   }
325 
326   Node* CmpI(Node* l, Node* r)                { return _gvn.transform(new CmpINode(l, r));       }
327   Node* CmpL(Node* l, Node* r)                { return _gvn.transform(new CmpLNode(l, r));       }
328   Node* CmpP(Node* l, Node* r)                { return _gvn.transform(new CmpPNode(l, r));       }
329   Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); }
330 
331   Node* AddP(Node* b, Node* a, Node* o)       { return _gvn.transform(new AddPNode(b, a, o));    }
332 
333   // Convert between int and long, and size_t.
334   // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
335   Node* ConvI2L(Node* offset);
336   Node* ConvI2UL(Node* offset);
337   Node* ConvL2I(Node* offset);
338   // Find out the klass of an object.
339   Node* load_object_klass(Node* object);
340   // Find out the length of an array.
341   Node* load_array_length(Node* array);
342   // Cast array allocation's length as narrow as possible.
343   // If replace_length_in_map is true, replace length with CastIINode in map.
344   // This method is invoked after creating/moving ArrayAllocationNode or in load_array_length
345   Node* array_ideal_length(AllocateArrayNode* alloc,
346                            const TypeOopPtr* oop_type,
347                            bool replace_length_in_map);
348 
349 
350   // Helper function to do a NULL pointer check or ZERO check based on type.
351   // Throw an exception if a given value is null.
352   // Return the value cast to not-null.
353   // Be clever about equivalent dominating null checks.
354   Node* null_check_common(Node* value, BasicType type,
355                           bool assert_null = false,
356                           Node* *null_control = NULL,
357                           bool speculative = false);
358   Node* null_check(Node* value, BasicType type = T_OBJECT) {
359     return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
360   }
361   Node* null_check_receiver() {
362     assert(argument(0)->bottom_type()->isa_ptr(), "must be");
363     return null_check(argument(0));
364   }
365   Node* zero_check_int(Node* value) {
366     assert(value->bottom_type()->basic_type() == T_INT,
367            "wrong type: %s", type2name(value->bottom_type()->basic_type()));
368     return null_check_common(value, T_INT);
369   }
370   Node* zero_check_long(Node* value) {
371     assert(value->bottom_type()->basic_type() == T_LONG,
372            "wrong type: %s", type2name(value->bottom_type()->basic_type()));
373     return null_check_common(value, T_LONG);
374   }
375   // Throw an uncommon trap if a given value is __not__ null.
376   // Return the value cast to null, and be clever about dominating checks.
377   Node* null_assert(Node* value, BasicType type = T_OBJECT) {
378     return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null());
379   }
380 
381   // Check if value is null and abort if it is
382   Node* must_be_not_null(Node* value, bool do_replace_in_map);
383 
384   // Null check oop.  Return null-path control into (*null_control).
385   // Return a cast-not-null node which depends on the not-null control.
386   // If never_see_null, use an uncommon trap (*null_control sees a top).
387   // The cast is not valid along the null path; keep a copy of the original.
388   // If safe_for_replace, then we can replace the value with the cast
389   // in the parsing map (the cast is guaranteed to dominate the map)
390   Node* null_check_oop(Node* value, Node* *null_control,
391                        bool never_see_null = false,
392                        bool safe_for_replace = false,
393                        bool speculative = false);
394 
395   // Check the null_seen bit.
396   bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
397 
398   void guard_klass_being_initialized(Node* klass);
399   void guard_init_thread(Node* klass);
400 
401   void clinit_barrier(ciInstanceKlass* ik, ciMethod* context);
402 
403   // Check for unique class for receiver at call
404   ciKlass* profile_has_unique_klass() {
405     ciCallProfile profile = method()->call_profile_at_bci(bci());
406     if (profile.count() >= 0 &&         // no cast failures here
407         profile.has_receiver(0) &&
408         profile.morphism() == 1) {
409       return profile.receiver(0);
410     }
411     return NULL;
412   }
413 
414   // record type from profiling with the type system
415   Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind);
416   void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
417   void record_profiled_parameters_for_speculation();
418   void record_profiled_return_for_speculation();
419   Node* record_profiled_receiver_for_speculation(Node* n);
420 
421   // Use the type profile to narrow an object type.
422   Node* maybe_cast_profiled_receiver(Node* not_null_obj,
423                                      const TypeKlassPtr* require_klass,
424                                      ciKlass* spec,
425                                      bool safe_for_replace);
426 
427   // Cast obj to type and emit guard unless we had too many traps here already
428   Node* maybe_cast_profiled_obj(Node* obj,
429                                 ciKlass* type,
430                                 bool not_null = false);
431 
432   // Cast obj to not-null on this path
433   Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
434   // Replace all occurrences of one node by another.
435   void replace_in_map(Node* old, Node* neww);
436 
437   void  push(Node* n)     { map_not_null();        _map->set_stack(_map->_jvms,   _sp++        , n); }
438   Node* pop()             { map_not_null(); return _map->stack(    _map->_jvms, --_sp             ); }
439   Node* peek(int off = 0) { map_not_null(); return _map->stack(    _map->_jvms,   _sp - off - 1   ); }
440 
441   void push_pair(Node* ldval) {
442     push(ldval);
443     push(top());  // the halfword is merely a placeholder
444   }
445   void push_pair_local(int i) {
446     // longs are stored in locals in "push" order
447     push(  local(i+0) );  // the real value
448     assert(local(i+1) == top(), "");
449     push(top());  // halfword placeholder
450   }
451   Node* pop_pair() {
452     // the second half is pushed last & popped first; it contains exactly nothing
453     Node* halfword = pop();
454     assert(halfword == top(), "");
455     // the long bits are pushed first & popped last:
456     return pop();
457   }
458   void set_pair_local(int i, Node* lval) {
459     // longs are stored in locals as a value/half pair (like doubles)
460     set_local(i+0, lval);
461     set_local(i+1, top());
462   }
463 
464   // Push the node, which may be zero, one, or two words.
465   void push_node(BasicType n_type, Node* n) {
466     int n_size = type2size[n_type];
467     if      (n_size == 1)  push(      n );  // T_INT, ...
468     else if (n_size == 2)  push_pair( n );  // T_DOUBLE, T_LONG
469     else                   { assert(n_size == 0, "must be T_VOID"); }
470   }
471 
472   Node* pop_node(BasicType n_type) {
473     int n_size = type2size[n_type];
474     if      (n_size == 1)  return pop();
475     else if (n_size == 2)  return pop_pair();
476     else                   return NULL;
477   }
478 
479   Node* control()               const { return map_not_null()->control(); }
480   Node* i_o()                   const { return map_not_null()->i_o(); }
481   Node* returnadr()             const { return map_not_null()->returnadr(); }
482   Node* frameptr()              const { return map_not_null()->frameptr(); }
483   Node* local(uint idx)         const { map_not_null(); return _map->local(      _map->_jvms, idx); }
484   Node* stack(uint idx)         const { map_not_null(); return _map->stack(      _map->_jvms, idx); }
485   Node* argument(uint idx)      const { map_not_null(); return _map->argument(   _map->_jvms, idx); }
486   Node* monitor_box(uint idx)   const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
487   Node* monitor_obj(uint idx)   const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
488 
489   void set_control  (Node* c)         { map_not_null()->set_control(c); }
490   void set_i_o      (Node* c)         { map_not_null()->set_i_o(c); }
491   void set_local(uint idx, Node* c)   { map_not_null(); _map->set_local(   _map->_jvms, idx, c); }
492   void set_stack(uint idx, Node* c)   { map_not_null(); _map->set_stack(   _map->_jvms, idx, c); }
493   void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
494   void ensure_stack(uint stk_size)    { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
495 
496   // Access unaliased memory
497   Node* memory(uint alias_idx);
498   Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
499   Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
500 
501   // Access immutable memory
502   Node* immutable_memory() { return C->immutable_memory(); }
503 
504   // Set unaliased memory
505   void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
506   void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
507   void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
508 
509   // Get the entire memory state (probably a MergeMemNode), and reset it
510   // (The resetting prevents somebody from using the dangling Node pointer.)
511   Node* reset_memory();
512 
513   // Get the entire memory state, asserted to be a MergeMemNode.
514   MergeMemNode* merged_memory() {
515     Node* mem = map_not_null()->memory();
516     assert(mem->is_MergeMem(), "parse memory is always pre-split");
517     return mem->as_MergeMem();
518   }
519 
520   // Set the entire memory state; produce a new MergeMemNode.
521   void set_all_memory(Node* newmem);
522 
523   // Create a memory projection from the call, then set_all_memory.
524   void set_all_memory_call(Node* call, bool separate_io_proj = false);
525 
526   // Create a LoadNode, reading from the parser's memory state.
527   // (Note:  require_atomic_access is useful only with T_LONG.)
528   //
529   // We choose the unordered semantics by default because we have
530   // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
531   // of volatile fields.
532   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
533                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
534                   bool require_atomic_access = false, bool unaligned = false,
535                   bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
536     // This version computes alias_index from bottom_type
537     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
538                      mo, control_dependency, require_atomic_access,
539                      unaligned, mismatched, unsafe, barrier_data);
540   }
541   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
542                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
543                   bool require_atomic_access = false, bool unaligned = false,
544                   bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
545     // This version computes alias_index from an address type
546     assert(adr_type != NULL, "use other make_load factory");
547     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
548                      mo, control_dependency, require_atomic_access,
549                      unaligned, mismatched, unsafe, barrier_data);
550   }
551   // This is the base version which is given an alias index.
552   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
553                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
554                   bool require_atomic_access = false, bool unaligned = false,
555                   bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
556 
557   // Create & transform a StoreNode and store the effect into the
558   // parser's memory state.
559   //
560   // We must ensure that stores of object references will be visible
561   // only after the object's initialization. So the clients of this
562   // procedure must indicate that the store requires `release'
563   // semantics, if the stored value is an object reference that might
564   // point to a new object and may become externally visible.
565   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
566                         const TypePtr* adr_type,
567                         MemNode::MemOrd mo,
568                         bool require_atomic_access = false,
569                         bool unaligned = false,
570                         bool mismatched = false,
571                         bool unsafe = false,
572                         int barrier_data = 0) {
573     // This version computes alias_index from an address type
574     assert(adr_type != NULL, "use other store_to_memory factory");
575     return store_to_memory(ctl, adr, val, bt,
576                            C->get_alias_index(adr_type),
577                            mo, require_atomic_access,
578                            unaligned, mismatched, unsafe,
579                            barrier_data);
580   }
581   // This is the base version which is given alias index
582   // Return the new StoreXNode
583   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
584                         int adr_idx,
585                         MemNode::MemOrd,
586                         bool require_atomic_access = false,
587                         bool unaligned = false,
588                         bool mismatched = false,
589                         bool unsafe = false,
590                         int barrier_data = 0);
591 
592   // Perform decorated accesses
593 
594   Node* access_store_at(Node* obj,   // containing obj
595                         Node* adr,   // actual address to store val at
596                         const TypePtr* adr_type,
597                         Node* val,
598                         const Type* val_type,
599                         BasicType bt,
600                         DecoratorSet decorators);
601 
602   Node* access_load_at(Node* obj,   // containing obj
603                        Node* adr,   // actual address to load val at
604                        const TypePtr* adr_type,
605                        const Type* val_type,
606                        BasicType bt,
607                        DecoratorSet decorators);
608 
609   Node* access_load(Node* adr,   // actual address to load val at
610                     const Type* val_type,
611                     BasicType bt,
612                     DecoratorSet decorators);
613 
614   Node* access_atomic_cmpxchg_val_at(Node* obj,
615                                      Node* adr,
616                                      const TypePtr* adr_type,
617                                      int alias_idx,
618                                      Node* expected_val,
619                                      Node* new_val,
620                                      const Type* value_type,
621                                      BasicType bt,
622                                      DecoratorSet decorators);
623 
624   Node* access_atomic_cmpxchg_bool_at(Node* obj,
625                                       Node* adr,
626                                       const TypePtr* adr_type,
627                                       int alias_idx,
628                                       Node* expected_val,
629                                       Node* new_val,
630                                       const Type* value_type,
631                                       BasicType bt,
632                                       DecoratorSet decorators);
633 
634   Node* access_atomic_xchg_at(Node* obj,
635                               Node* adr,
636                               const TypePtr* adr_type,
637                               int alias_idx,
638                               Node* new_val,
639                               const Type* value_type,
640                               BasicType bt,
641                               DecoratorSet decorators);
642 
643   Node* access_atomic_add_at(Node* obj,
644                              Node* adr,
645                              const TypePtr* adr_type,
646                              int alias_idx,
647                              Node* new_val,
648                              const Type* value_type,
649                              BasicType bt,
650                              DecoratorSet decorators);
651 
652   void access_clone(Node* src, Node* dst, Node* size, bool is_array);
653 
654   // Return addressing for an array element.
655   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
656                               // Optional constraint on the array size:
657                               const TypeInt* sizetype = NULL,
658                               // Optional control dependency (for example, on range check)
659                               Node* ctrl = NULL);
660 
661   // Return a load of array element at idx.
662   Node* load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl);
663 
664   //---------------- Dtrace support --------------------
665   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
666   void make_dtrace_method_entry(ciMethod* method) {
667     make_dtrace_method_entry_exit(method, true);
668   }
669   void make_dtrace_method_exit(ciMethod* method) {
670     make_dtrace_method_entry_exit(method, false);
671   }
672 
673   //--------------- stub generation -------------------
674  public:
675   void gen_stub(address C_function,
676                 const char *name,
677                 int is_fancy_jump,
678                 bool pass_tls,
679                 bool return_pc);
680 
681   //---------- help for generating calls --------------
682 
683   // Do a null check on the receiver as it would happen before the call to
684   // callee (with all arguments still on the stack).
685   Node* null_check_receiver_before_call(ciMethod* callee) {
686     assert(!callee->is_static(), "must be a virtual method");
687     // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
688     // Use callsite signature always.
689     ciMethod* declared_method = method()->get_method_at_bci(bci());
690     const int nargs = declared_method->arg_size();
691     inc_sp(nargs);
692     Node* n = null_check_receiver();
693     dec_sp(nargs);
694     return n;
695   }
696 
697   // Fill in argument edges for the call from argument(0), argument(1), ...
698   // (The next step is to call set_edges_for_java_call.)
699   void  set_arguments_for_java_call(CallJavaNode* call);
700 
701   // Fill in non-argument edges for the call.
702   // Transform the call, and update the basics: control, i_o, memory.
703   // (The next step is usually to call set_results_for_java_call.)
704   void set_edges_for_java_call(CallJavaNode* call,
705                                bool must_throw = false, bool separate_io_proj = false);
706 
707   // Finish up a java call that was started by set_edges_for_java_call.
708   // Call add_exception on any throw arising from the call.
709   // Return the call result (transformed).
710   Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
711 
712   // Similar to set_edges_for_java_call, but simplified for runtime calls.
713   void  set_predefined_output_for_runtime_call(Node* call) {
714     set_predefined_output_for_runtime_call(call, NULL, NULL);
715   }
716   void  set_predefined_output_for_runtime_call(Node* call,
717                                                Node* keep_mem,
718                                                const TypePtr* hook_mem);
719   Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL);
720 
721   // Replace the call with the current state of the kit.  Requires
722   // that the call was generated with separate io_projs so that
723   // exceptional control flow can be handled properly.
724   void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false);
725 
726   // helper functions for statistics
727   void increment_counter(address counter_addr);   // increment a debug counter
728   void increment_counter(Node*   counter_addr);   // increment a debug counter
729 
730   // Bail out to the interpreter right now
731   // The optional klass is the one causing the trap.
732   // The optional reason is debug information written to the compile log.
733   // Optional must_throw is the same as with add_safepoint_edges.
734   Node* uncommon_trap(int trap_request,
735                      ciKlass* klass = NULL, const char* reason_string = NULL,
736                      bool must_throw = false, bool keep_exact_action = false);
737 
738   // Shorthand, to avoid saying "Deoptimization::" so many times.
739   Node* uncommon_trap(Deoptimization::DeoptReason reason,
740                      Deoptimization::DeoptAction action,
741                      ciKlass* klass = NULL, const char* reason_string = NULL,
742                      bool must_throw = false, bool keep_exact_action = false) {
743     return uncommon_trap(Deoptimization::make_trap_request(reason, action),
744                   klass, reason_string, must_throw, keep_exact_action);
745   }
746 
747   // Bail out to the interpreter and keep exact action (avoid switching to Action_none).
748   Node* uncommon_trap_exact(Deoptimization::DeoptReason reason,
749                            Deoptimization::DeoptAction action,
750                            ciKlass* klass = NULL, const char* reason_string = NULL,
751                            bool must_throw = false) {
752     return uncommon_trap(Deoptimization::make_trap_request(reason, action),
753                   klass, reason_string, must_throw, /*keep_exact_action=*/true);
754   }
755 
756   // SP when bytecode needs to be reexecuted.
757   virtual int reexecute_sp() { return sp(); }
758 
759   // Report if there were too many traps at the current method and bci.
760   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
761   // If there is no MDO at all, report no trap unless told to assume it.
762   bool too_many_traps(Deoptimization::DeoptReason reason) {
763     return C->too_many_traps(method(), bci(), reason);
764   }
765 
766   // Report if there were too many recompiles at the current method and bci.
767   bool too_many_recompiles(Deoptimization::DeoptReason reason) {
768     return C->too_many_recompiles(method(), bci(), reason);
769   }
770 
771   bool too_many_traps_or_recompiles(Deoptimization::DeoptReason reason) {
772       return C->too_many_traps_or_recompiles(method(), bci(), reason);
773   }
774 
775   // Returns the object (if any) which was created the moment before.
776   Node* just_allocated_object(Node* current_control);
777 
778   // Sync Ideal and Graph kits.
779   void sync_kit(IdealKit& ideal);
780   void final_sync(IdealKit& ideal);
781 
782   public:
783   // Helper function to round double arguments before a call
784   void round_double_arguments(ciMethod* dest_method);
785 
786   // rounding for strict float precision conformance
787   Node* precision_rounding(Node* n);
788 
789   // rounding for strict double precision conformance
790   Node* dprecision_rounding(Node* n);
791 
792   // Helper functions for fast/slow path codes
793   Node* opt_iff(Node* region, Node* iff);
794   Node* make_runtime_call(int flags,
795                           const TypeFunc* call_type, address call_addr,
796                           const char* call_name,
797                           const TypePtr* adr_type, // NULL if no memory effects
798                           Node* parm0 = NULL, Node* parm1 = NULL,
799                           Node* parm2 = NULL, Node* parm3 = NULL,
800                           Node* parm4 = NULL, Node* parm5 = NULL,
801                           Node* parm6 = NULL, Node* parm7 = NULL);
802 
803   Node* sign_extend_byte(Node* in);
804   Node* sign_extend_short(Node* in);
805 
806   enum {  // flag values for make_runtime_call
807     RC_NO_FP = 1,               // CallLeafNoFPNode
808     RC_NO_IO = 2,               // do not hook IO edges
809     RC_NO_LEAF = 4,             // CallStaticJavaNode
810     RC_MUST_THROW = 8,          // flag passed to add_safepoint_edges
811     RC_NARROW_MEM = 16,         // input memory is same as output
812     RC_UNCOMMON = 32,           // freq. expected to be like uncommon trap
813     RC_VECTOR = 64,             // CallLeafVectorNode
814     RC_LEAF = 0                 // null value:  no flags set
815   };
816 
817   // merge in all memory slices from new_mem, along the given path
818   void merge_memory(Node* new_mem, Node* region, int new_path);
819   void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
820 
821   // Helper functions to build synchronizations
822   int next_monitor();
823   Node* insert_mem_bar(int opcode, Node* precedent = NULL);
824   Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
825   // Optional 'precedent' is appended as an extra edge, to force ordering.
826   FastLockNode* shared_lock(Node* obj);
827   void shared_unlock(Node* box, Node* obj);
828 
829   // helper functions for the fast path/slow path idioms
830   Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
831 
832   // Generate an instance-of idiom.  Used by both the instance-of bytecode
833   // and the reflective instance-of call.
834   Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
835 
836   // Generate a check-cast idiom.  Used by both the check-cast bytecode
837   // and the array-store bytecode
838   Node* gen_checkcast( Node *subobj, Node* superkls,
839                        Node* *failure_control = NULL );
840 
841   Node* gen_subtype_check(Node* obj, Node* superklass);
842 
843   // Exact type check used for predicted calls and casts.
844   // Rewrites (*casted_receiver) to be casted to the stronger type.
845   // (Caller is responsible for doing replace_in_map.)
846   Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
847                             Node* *casted_receiver);
848 
849   // Inexact type check used for predicted calls.
850   Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
851                                Node** casted_receiver);
852 
853   // implementation of object creation
854   Node* set_output_for_allocation(AllocateNode* alloc,
855                                   const TypeOopPtr* oop_type,
856                                   bool deoptimize_on_exception=false);
857   Node* get_layout_helper(Node* klass_node, jint& constant_value);
858   Node* new_instance(Node* klass_node,
859                      Node* slow_test = NULL,
860                      Node* *return_size_val = NULL,
861                      bool deoptimize_on_exception = false);
862   Node* new_array(Node* klass_node, Node* count_val, int nargs,
863                   Node* *return_size_val = NULL,
864                   bool deoptimize_on_exception = false);
865 
866   // java.lang.String helpers
867   Node* load_String_length(Node* str, bool set_ctrl);
868   Node* load_String_value(Node* str, bool set_ctrl);
869   Node* load_String_coder(Node* str, bool set_ctrl);
870   void store_String_value(Node* str, Node* value);
871   void store_String_coder(Node* str, Node* value);
872   Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
873   Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
874   void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
875   void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
876 
877   // Handy for making control flow
878   IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
879     IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
880     _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
881     // Place 'if' on worklist if it will be in graph
882     if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
883     return iff;
884   }
885 
886   IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
887     IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
888     _gvn.transform(iff);                           // Value may be known at parse-time
889     // Place 'if' on worklist if it will be in graph
890     if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
891     return iff;
892   }
893 
894   void add_empty_predicates(int nargs = 0);
895   void add_empty_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
896 
897   Node* make_constant_from_field(ciField* field, Node* obj);
898 
899   // Vector API support (implemented in vectorIntrinsics.cpp)
900   Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
901   Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool shuffle_to_vector = false);
902   Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
903 };
904 
905 // Helper class to support building of control flow branches. Upon
906 // creation the map and sp at bci are cloned and restored upon de-
907 // struction. Typical use:
908 //
909 // { PreserveJVMState pjvms(this);
910 //   // code of new branch
911 // }
912 // // here the JVM state at bci is established
913 
914 class PreserveJVMState: public StackObj {
915  protected:
916   GraphKit*      _kit;
917 #ifdef ASSERT
918   int            _block;  // PO of current block, if a Parse
919   int            _bci;
920 #endif
921   SafePointNode* _map;
922   uint           _sp;
923 
924  public:
925   PreserveJVMState(GraphKit* kit, bool clone_map = true);
926   ~PreserveJVMState();
927 };
928 
929 // Helper class to build cutouts of the form if (p) ; else {x...}.
930 // The code {x...} must not fall through.
931 // The kit's main flow of control is set to the "then" continuation of if(p).
932 class BuildCutout: public PreserveJVMState {
933  public:
934   BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
935   ~BuildCutout();
936 };
937 
938 // Helper class to preserve the original _reexecute bit and _sp and restore
939 // them back
940 class PreserveReexecuteState: public StackObj {
941  protected:
942   GraphKit*                 _kit;
943   uint                      _sp;
944   JVMState::ReexecuteState  _reexecute;
945 
946  public:
947   PreserveReexecuteState(GraphKit* kit);
948   ~PreserveReexecuteState();
949 };
950 
951 #endif // SHARE_OPTO_GRAPHKIT_HPP