1 /*
2 * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_GRAPHKIT_HPP
26 #define SHARE_OPTO_GRAPHKIT_HPP
27
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciMethodData.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/callnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/compile.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/mulnode.hpp"
38 #include "opto/phaseX.hpp"
39 #include "opto/subnode.hpp"
40 #include "opto/type.hpp"
41 #include "runtime/deoptimization.hpp"
42
43 class BarrierSetC2;
44 class FastLockNode;
45 class FastUnlockNode;
46 class IdealKit;
47 class LibraryCallKit;
48 class Parse;
49 class RootNode;
50
51 //-----------------------------------------------------------------------------
52 //----------------------------GraphKit-----------------------------------------
53 // Toolkit for building the common sorts of subgraphs.
54 // Does not know about bytecode parsing or type-flow results.
55 // It is able to create graphs implementing the semantics of most
56 // or all bytecodes, so that it can expand intrinsics and calls.
57 // It may depend on JVMState structure, but it must not depend
58 // on specific bytecode streams.
59 class GraphKit : public Phase {
60 friend class PreserveJVMState;
61
62 protected:
63 ciEnv* _env; // Compilation environment
64 PhaseGVN &_gvn; // Some optimizations while parsing
65 SafePointNode* _map; // Parser map from JVM to Nodes
66 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
67 int _bci; // JVM Bytecode Pointer
68 ciMethod* _method; // JVM Current Method
69 BarrierSetC2* _barrier_set;
70 #ifdef ASSERT
71 uint _worklist_size;
72 #endif
73
74 private:
75 int _sp; // JVM Expression Stack Pointer; don't modify directly!
76
77 private:
78 SafePointNode* map_not_null() const {
79 assert(_map != nullptr, "must call stopped() to test for reset compiler map");
80 return _map;
81 }
82
83 public:
84 GraphKit(); // empty constructor
85 GraphKit(JVMState* jvms, PhaseGVN* gvn = nullptr); // the JVM state on which to operate
86
87 #ifdef ASSERT
88 ~GraphKit() {
89 assert(failing_internal() || !has_exceptions(),
90 "unless compilation failed, user must call transfer_exceptions_into_jvms");
91 #if 0
92 // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN
93 // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies,
94 // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit.
95 assert((_gvn.is_IterGVN() == nullptr) || (_gvn.C->for_igvn()->size() == _worklist_size),
96 "GraphKit should not modify _for_igvn worklist after parsing");
97 #endif
98 }
99 #endif
100
101 virtual Parse* is_Parse() const { return nullptr; }
102 virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; }
103
104 ciEnv* env() const { return _env; }
105 PhaseGVN& gvn() const { return _gvn; }
106 void* barrier_set_state() const { return C->barrier_set_state(); }
107
108 void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); }
109 void remove_for_igvn(Node* n) const { C->remove_for_igvn(n); }
110
111 // Handy well-known nodes:
112 Node* null() const { return zerocon(T_OBJECT); }
113 Node* top() const { return C->top(); }
114 RootNode* root() const { return C->root(); }
115
116 // Create or find a constant node
117 Node* intcon(jint con) const { return _gvn.intcon(con); }
118 Node* longcon(jlong con) const { return _gvn.longcon(con); }
119 Node* integercon(jlong con, BasicType bt) const {
120 if (bt == T_INT) {
121 return intcon(checked_cast<jint>(con));
122 }
123 assert(bt == T_LONG, "basic type not an int or long");
124 return longcon(con);
125 }
126 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
127 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
128 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
129
130 jint find_int_con(Node* n, jint value_if_unknown) {
131 return _gvn.find_int_con(n, value_if_unknown);
132 }
133 jlong find_long_con(Node* n, jlong value_if_unknown) {
134 return _gvn.find_long_con(n, value_if_unknown);
135 }
136 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
137
138 // JVM State accessors:
139 // Parser mapping from JVM indices into Nodes.
140 // Low slots are accessed by the StartNode::enum.
141 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
142 // Then come JVM stack slots.
143 // Finally come the monitors, if any.
144 // See layout accessors in class JVMState.
145
146 SafePointNode* map() const { return _map; }
147 bool has_exceptions() const { return _exceptions != nullptr; }
148 JVMState* jvms() const { return map_not_null()->_jvms; }
149 int sp() const { return _sp; }
150 int bci() const { return _bci; }
151 Bytecodes::Code java_bc() const;
152 ciMethod* method() const { return _method; }
153
154 void set_jvms(JVMState* jvms) { set_map(jvms->map());
155 assert(jvms == this->jvms(), "sanity");
156 _sp = jvms->sp();
157 _bci = jvms->bci();
158 _method = jvms->has_method() ? jvms->method() : nullptr; }
159 void set_map(SafePointNode* m) { _map = m; DEBUG_ONLY(verify_map()); }
160 void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; }
161 void clean_stack(int from_sp); // clear garbage beyond from_sp to top
162
163 void inc_sp(int i) { set_sp(sp() + i); }
164 void dec_sp(int i) { set_sp(sp() - i); }
165 void set_bci(int bci) { _bci = bci; }
166
167 // Make sure jvms has current bci & sp.
168 JVMState* sync_jvms() const;
169 JVMState* sync_jvms_for_reexecute();
170
171 #ifdef ASSERT
172 // Make sure JVMS has an updated copy of bci and sp.
173 // Also sanity-check method, depth, and monitor depth.
174 bool jvms_in_sync() const;
175
176 // Make sure the map looks OK.
177 void verify_map() const;
178
179 // Make sure a proposed exception state looks OK.
180 static void verify_exception_state(SafePointNode* ex_map);
181 #endif
182
183 // Clone the existing map state. (Implements PreserveJVMState.)
184 SafePointNode* clone_map();
185
186 // Reverses the work done by clone_map(). Should only be used when the node returned by
187 // clone_map() is ultimately not used. Calling Node::destruct directly in the previously
188 // mentioned circumstance instead of this method may result in use-after-free.
189 void destruct_map_clone(SafePointNode* sfp);
190
191 // Set the map to a clone of the given one.
192 void set_map_clone(SafePointNode* m);
193
194 // Tell if the compilation is failing.
195 bool failing() const { return C->failing(); }
196 bool failing_internal() const { return C->failing_internal(); }
197
198 // Set _map to null, signalling a stop to further bytecode execution.
199 // Preserve the map intact for future use, and return it back to the caller.
200 SafePointNode* stop() { SafePointNode* m = map(); set_map(nullptr); return m; }
201
202 // Stop, but first smash the map's inputs to null, to mark it dead.
203 void stop_and_kill_map();
204
205 // Tell if _map is null, or control is top.
206 bool stopped();
207
208 // Tell if this method or any caller method has exception handlers.
209 bool has_exception_handler();
210
211 // Save an exception without blowing stack contents or other JVM state.
212 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
213 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
214
215 // Recover a saved exception from its map.
216 static Node* saved_ex_oop(SafePointNode* ex_map);
217
218 // Recover a saved exception from its map, and remove it from the map.
219 static Node* clear_saved_ex_oop(SafePointNode* ex_map);
220
221 #ifdef ASSERT
222 // Recover a saved exception from its map, and remove it from the map.
223 static bool has_saved_ex_oop(SafePointNode* ex_map);
224 #endif
225
226 // Push an exception in the canonical position for handlers (stack(0)).
227 void push_ex_oop(Node* ex_oop) {
228 ensure_stack(1); // ensure room to push the exception
229 set_stack(0, ex_oop);
230 set_sp(1);
231 clean_stack(1);
232 }
233
234 // Detach and return an exception state.
235 SafePointNode* pop_exception_state() {
236 SafePointNode* ex_map = _exceptions;
237 if (ex_map != nullptr) {
238 _exceptions = ex_map->next_exception();
239 ex_map->set_next_exception(nullptr);
240 DEBUG_ONLY(verify_exception_state(ex_map));
241 }
242 return ex_map;
243 }
244
245 // Add an exception, using the given JVM state, without commoning.
246 void push_exception_state(SafePointNode* ex_map) {
247 DEBUG_ONLY(verify_exception_state(ex_map));
248 ex_map->set_next_exception(_exceptions);
249 _exceptions = ex_map;
250 }
251
252 // Turn the current JVM state into an exception state, appending the ex_oop.
253 SafePointNode* make_exception_state(Node* ex_oop);
254
255 // Add an exception, using the given JVM state.
256 // Combine all exceptions with a common exception type into a single state.
257 // (This is done via combine_exception_states.)
258 void add_exception_state(SafePointNode* ex_map);
259
260 // Combine all exceptions of any sort whatever into a single master state.
261 SafePointNode* combine_and_pop_all_exception_states() {
262 if (_exceptions == nullptr) return nullptr;
263 SafePointNode* phi_map = pop_exception_state();
264 SafePointNode* ex_map;
265 while ((ex_map = pop_exception_state()) != nullptr) {
266 combine_exception_states(ex_map, phi_map);
267 }
268 return phi_map;
269 }
270
271 // Combine the two exception states, building phis as necessary.
272 // The second argument is updated to include contributions from the first.
273 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
274
275 // Reset the map to the given state. If there are any half-finished phis
276 // in it (created by combine_exception_states), transform them now.
277 // Returns the exception oop. (Caller must call push_ex_oop if required.)
278 Node* use_exception_state(SafePointNode* ex_map);
279
280 // Collect exceptions from a given JVM state into my exception list.
281 void add_exception_states_from(JVMState* jvms);
282
283 // Collect all raised exceptions into the current JVM state.
284 // Clear the current exception list and map, returns the combined states.
285 JVMState* transfer_exceptions_into_jvms();
286
287 // Helper to throw a built-in exception.
288 // The JVMS must allow the bytecode to be re-executed via an uncommon trap.
289 void builtin_throw(Deoptimization::DeoptReason reason);
290 void builtin_throw(Deoptimization::DeoptReason reason,
291 ciInstance* exception_object,
292 bool allow_too_many_traps);
293 bool builtin_throw_too_many_traps(Deoptimization::DeoptReason reason,
294 ciInstance* exception_object);
295 private:
296 bool is_builtin_throw_hot(Deoptimization::DeoptReason reason);
297 ciInstance* builtin_throw_exception(Deoptimization::DeoptReason reason) const;
298
299 public:
300
301 // Helper to check the JavaThread::_should_post_on_exceptions flag
302 // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
303 void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
304 bool must_throw) ;
305
306 // Helper Functions for adding debug information
307 void kill_dead_locals();
308 #ifdef ASSERT
309 bool dead_locals_are_killed();
310 #endif
311 // The call may deoptimize. Supply required JVM state as debug info.
312 // If must_throw is true, the call is guaranteed not to return normally.
313 void add_safepoint_edges(SafePointNode* call,
314 bool must_throw = false);
315
316 // How many stack inputs does the current BC consume?
317 // And, how does the stack change after the bytecode?
318 // Returns false if unknown.
319 bool compute_stack_effects(int& inputs, int& depth);
320
321 // Add a fixed offset to a pointer
322 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
323 return basic_plus_adr(base, ptr, MakeConX(offset));
324 }
325 Node* basic_plus_adr(Node* base, intptr_t offset) {
326 return basic_plus_adr(base, base, MakeConX(offset));
327 }
328 // Add a variable offset to a pointer
329 Node* basic_plus_adr(Node* base, Node* offset) {
330 return basic_plus_adr(base, base, offset);
331 }
332 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
333
334
335 // Some convenient shortcuts for common nodes
336 Node* IfTrue(IfNode* iff) { return _gvn.transform(new IfTrueNode(iff)); }
337 Node* IfFalse(IfNode* iff) { return _gvn.transform(new IfFalseNode(iff)); }
338
339 Node* AddI(Node* l, Node* r) { return _gvn.transform(new AddINode(l, r)); }
340 Node* SubI(Node* l, Node* r) { return _gvn.transform(new SubINode(l, r)); }
341 Node* MulI(Node* l, Node* r) { return _gvn.transform(new MulINode(l, r)); }
342 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new DivINode(ctl, l, r)); }
343
344 Node* AndI(Node* l, Node* r) { return _gvn.transform(new AndINode(l, r)); }
345 Node* OrI(Node* l, Node* r) { return _gvn.transform(new OrINode(l, r)); }
346 Node* XorI(Node* l, Node* r) { return _gvn.transform(new XorINode(l, r)); }
347
348 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new MaxINode(l, r)); }
349 Node* MinI(Node* l, Node* r) { return _gvn.transform(new MinINode(l, r)); }
350
351 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new LShiftINode(l, r)); }
352 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new RShiftINode(l, r)); }
353 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new URShiftINode(l, r)); }
354
355 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new CmpINode(l, r)); }
356 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new CmpLNode(l, r)); }
357 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new CmpPNode(l, r)); }
358 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); }
359
360 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new AddPNode(b, a, o)); }
361
362 // Convert between int and long, and size_t.
363 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
364 Node* ConvI2L(Node* offset);
365 Node* ConvI2UL(Node* offset);
366 Node* ConvL2I(Node* offset);
367 // Find out the klass of an object.
368 Node* load_object_klass(Node* object);
369 // Find out the length of an array.
370 Node* load_array_length(Node* array);
371 // Cast array allocation's length as narrow as possible.
372 // If replace_length_in_map is true, replace length with CastIINode in map.
373 // This method is invoked after creating/moving ArrayAllocationNode or in load_array_length
374 Node* array_ideal_length(AllocateArrayNode* alloc,
375 const TypeOopPtr* oop_type,
376 bool replace_length_in_map);
377
378
379 // Helper function to do a null pointer check or ZERO check based on type.
380 // Throw an exception if a given value is null.
381 // Return the value cast to not-null.
382 // Be clever about equivalent dominating null checks.
383 Node* null_check_common(Node* value, BasicType type,
384 bool assert_null = false,
385 Node* *null_control = nullptr,
386 bool speculative = false,
387 bool null_marker_check = false);
388 Node* null_check(Node* value, BasicType type = T_OBJECT) {
389 return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null());
390 }
391 Node* null_check_receiver() {
392 return null_check(argument(0));
393 }
394 Node* zero_check_int(Node* value) {
395 assert(value->bottom_type()->basic_type() == T_INT,
396 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
397 return null_check_common(value, T_INT);
398 }
399 Node* zero_check_long(Node* value) {
400 assert(value->bottom_type()->basic_type() == T_LONG,
401 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
402 return null_check_common(value, T_LONG);
403 }
404 // Throw an uncommon trap if a given value is __not__ null.
405 // Return the value cast to null, and be clever about dominating checks.
406 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
407 return null_check_common(value, type, true, nullptr, _gvn.type(value)->speculative_always_null());
408 }
409
410 // Check if value is null and abort if it is
411 Node* must_be_not_null(Node* value, bool do_replace_in_map);
412
413 // Null check oop. Return null-path control into (*null_control).
414 // Return a cast-not-null node which depends on the not-null control.
415 // If never_see_null, use an uncommon trap (*null_control sees a top).
416 // The cast is not valid along the null path; keep a copy of the original.
417 // If safe_for_replace, then we can replace the value with the cast
418 // in the parsing map (the cast is guaranteed to dominate the map)
419 Node* null_check_oop(Node* value, Node* *null_control,
420 bool never_see_null = false,
421 bool safe_for_replace = false,
422 bool speculative = false);
423
424 // Check the null_seen bit.
425 bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
426
427 void guard_klass_being_initialized(Node* klass);
428 void guard_init_thread(Node* klass);
429
430 void clinit_barrier(ciInstanceKlass* ik, ciMethod* context);
431
432 // Check for unique class for receiver at call
433 ciKlass* profile_has_unique_klass() {
434 ciCallProfile profile = method()->call_profile_at_bci(bci());
435 if (profile.count() >= 0 && // no cast failures here
436 profile.has_receiver(0) &&
437 profile.morphism() == 1) {
438 return profile.receiver(0);
439 }
440 return nullptr;
441 }
442
443 // record type from profiling with the type system
444 Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind);
445 void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
446 void record_profiled_parameters_for_speculation();
447 void record_profiled_return_for_speculation();
448 Node* record_profiled_receiver_for_speculation(Node* n);
449
450 // Use the type profile to narrow an object type.
451 Node* maybe_cast_profiled_receiver(Node* not_null_obj,
452 const TypeKlassPtr* require_klass,
453 ciKlass* spec,
454 bool safe_for_replace);
455
456 // Cast obj to type and emit guard unless we had too many traps here already
457 Node* maybe_cast_profiled_obj(Node* obj,
458 ciKlass* type,
459 bool not_null = false);
460
461 // Cast obj to not-null on this path
462 Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
463 // If a larval object appears multiple times in the JVMS and we encounter a loop, they will
464 // become multiple Phis and we cannot change all of them to non-larval when we invoke the
465 // constructor on one. The other case is that we don't know whether a parameter of an OSR
466 // compilation is larval or not. If such a maybe-larval object is passed into an operation that
467 // does not permit larval objects, we can be sure that it is not larval and scalarize it if it
468 // is a value object.
469 Node* cast_to_non_larval(Node* obj);
470 // Replace all occurrences of one node by another.
471 void replace_in_map(Node* old, Node* neww);
472
473 Node* maybe_narrow_object_type(Node* obj, ciKlass* type);
474
475 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); }
476 Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); }
477 Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); }
478
479 void push_pair(Node* ldval) {
480 push(ldval);
481 push(top()); // the halfword is merely a placeholder
482 }
483 void push_pair_local(int i) {
484 // longs are stored in locals in "push" order
485 push( local(i+0) ); // the real value
486 assert(local(i+1) == top(), "");
487 push(top()); // halfword placeholder
488 }
489 Node* pop_pair() {
490 // the second half is pushed last & popped first; it contains exactly nothing
491 Node* halfword = pop();
492 assert(halfword == top(), "");
493 // the long bits are pushed first & popped last:
494 return pop();
495 }
496 void set_pair_local(int i, Node* lval) {
497 // longs are stored in locals as a value/half pair (like doubles)
498 set_local(i+0, lval);
499 set_local(i+1, top());
500 }
501
502 // Push the node, which may be zero, one, or two words.
503 void push_node(BasicType n_type, Node* n) {
504 int n_size = type2size[n_type];
505 if (n_size == 1) push( n ); // T_INT, ...
506 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG
507 else { assert(n_size == 0, "must be T_VOID"); }
508 }
509
510 Node* pop_node(BasicType n_type) {
511 int n_size = type2size[n_type];
512 if (n_size == 1) return pop();
513 else if (n_size == 2) return pop_pair();
514 else return nullptr;
515 }
516
517 Node* control() const { return map_not_null()->control(); }
518 Node* i_o() const { return map_not_null()->i_o(); }
519 Node* returnadr() const { return map_not_null()->returnadr(); }
520 Node* frameptr() const { return map_not_null()->frameptr(); }
521 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); }
522 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); }
523 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); }
524 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
525 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
526
527 void set_control (Node* c) { map_not_null()->set_control(c); }
528 void set_i_o (Node* c) { map_not_null()->set_i_o(c); }
529 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); }
530 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); }
531 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
532 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
533
534 // Access unaliased memory
535 Node* memory(uint alias_idx);
536 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
537 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
538
539 // Access immutable memory
540 Node* immutable_memory() { return C->immutable_memory(); }
541
542 // Set unaliased memory
543 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
544 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
545 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
546
547 // Get the entire memory state (probably a MergeMemNode), and reset it
548 // (The resetting prevents somebody from using the dangling Node pointer.)
549 Node* reset_memory();
550
551 // Get the entire memory state, asserted to be a MergeMemNode.
552 MergeMemNode* merged_memory() {
553 Node* mem = map_not_null()->memory();
554 assert(mem->is_MergeMem(), "parse memory is always pre-split");
555 return mem->as_MergeMem();
556 }
557
558 // Set the entire memory state; produce a new MergeMemNode.
559 void set_all_memory(Node* newmem);
560
561 // Create a memory projection from the call, then set_all_memory.
562 void set_all_memory_call(Node* call, bool separate_io_proj = false);
563
564 // Create a LoadNode, reading from the parser's memory state.
565 // (Note: require_atomic_access is useful only with T_LONG.)
566 //
567 // We choose the unordered semantics by default because we have
568 // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
569 // of volatile fields.
570 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
571 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
572 bool require_atomic_access = false, bool unaligned = false,
573 bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
574
575 // Create & transform a StoreNode and store the effect into the
576 // parser's memory state.
577 //
578 // We must ensure that stores of object references will be visible
579 // only after the object's initialization. So the clients of this
580 // procedure must indicate that the store requires `release'
581 // semantics, if the stored value is an object reference that might
582 // point to a new object and may become externally visible.
583 // Return the new StoreXNode
584 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
585 MemNode::MemOrd,
586 bool require_atomic_access = false,
587 bool unaligned = false,
588 bool mismatched = false,
589 bool unsafe = false,
590 int barrier_data = 0);
591
592 // Perform decorated accesses
593
594 Node* access_store_at(Node* obj, // containing obj
595 Node* adr, // actual address to store val at
596 const TypePtr* adr_type,
597 Node* val,
598 const Type* val_type,
599 BasicType bt,
600 DecoratorSet decorators,
601 bool safe_for_replace = true,
602 const InlineTypeNode* vt = nullptr);
603
604 Node* access_load_at(Node* obj, // containing obj
605 Node* adr, // actual address to load val at
606 const TypePtr* adr_type,
607 const Type* val_type,
608 BasicType bt,
609 DecoratorSet decorators,
610 Node* ctl = nullptr);
611
612 Node* access_load(Node* adr, // actual address to load val at
613 const Type* val_type,
614 BasicType bt,
615 DecoratorSet decorators);
616
617 Node* access_atomic_cmpxchg_val_at(Node* obj,
618 Node* adr,
619 const TypePtr* adr_type,
620 int alias_idx,
621 Node* expected_val,
622 Node* new_val,
623 const Type* value_type,
624 BasicType bt,
625 DecoratorSet decorators);
626
627 Node* access_atomic_cmpxchg_bool_at(Node* obj,
628 Node* adr,
629 const TypePtr* adr_type,
630 int alias_idx,
631 Node* expected_val,
632 Node* new_val,
633 const Type* value_type,
634 BasicType bt,
635 DecoratorSet decorators);
636
637 Node* access_atomic_xchg_at(Node* obj,
638 Node* adr,
639 const TypePtr* adr_type,
640 int alias_idx,
641 Node* new_val,
642 const Type* value_type,
643 BasicType bt,
644 DecoratorSet decorators);
645
646 Node* access_atomic_add_at(Node* obj,
647 Node* adr,
648 const TypePtr* adr_type,
649 int alias_idx,
650 Node* new_val,
651 const Type* value_type,
652 BasicType bt,
653 DecoratorSet decorators);
654
655 void access_clone(Node* src, Node* dst, Node* size, bool is_array);
656
657 // Return addressing for an array element.
658 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
659 // Optional constraint on the array size:
660 const TypeInt* sizetype = nullptr,
661 // Optional control dependency (for example, on range check)
662 Node* ctrl = nullptr);
663 Node* cast_to_flat_array(Node* array, ciInlineKlass* elem_vk, bool is_null_free, bool is_not_null_free, bool is_atomic);
664
665 // Return a load of array element at idx.
666 Node* load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl);
667
668 //---------------- Dtrace support --------------------
669 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
670 void make_dtrace_method_entry(ciMethod* method) {
671 make_dtrace_method_entry_exit(method, true);
672 }
673 void make_dtrace_method_exit(ciMethod* method) {
674 make_dtrace_method_entry_exit(method, false);
675 }
676
677 //--------------- stub generation -------------------
678 public:
679 void gen_stub(address C_function,
680 const char *name,
681 int is_fancy_jump,
682 bool pass_tls,
683 bool return_pc);
684
685 //---------- help for generating calls --------------
686
687 // Do a null check on the receiver as it would happen before the call to
688 // callee (with all arguments still on the stack).
689 Node* null_check_receiver_before_call(ciMethod* callee) {
690 assert(!callee->is_static(), "must be a virtual method");
691 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
692 // Use callsite signature always.
693 ciMethod* declared_method = method()->get_method_at_bci(bci());
694 const int nargs = declared_method->arg_size();
695 inc_sp(nargs);
696 Node* n = null_check_receiver();
697 dec_sp(nargs);
698 return n;
699 }
700
701 // Fill in argument edges for the call from argument(0), argument(1), ...
702 // (The next step is to call set_edges_for_java_call.)
703 void set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline = false);
704
705 // Fill in non-argument edges for the call.
706 // Transform the call, and update the basics: control, i_o, memory.
707 // (The next step is usually to call set_results_for_java_call.)
708 void set_edges_for_java_call(CallJavaNode* call,
709 bool must_throw = false, bool separate_io_proj = false);
710
711 // Finish up a java call that was started by set_edges_for_java_call.
712 // Call add_exception on any throw arising from the call.
713 // Return the call result (transformed).
714 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
715
716 // Similar to set_edges_for_java_call, but simplified for runtime calls.
717 void set_predefined_output_for_runtime_call(Node* call) {
718 set_predefined_output_for_runtime_call(call, nullptr, nullptr);
719 }
720 void set_predefined_output_for_runtime_call(Node* call,
721 Node* keep_mem,
722 const TypePtr* hook_mem);
723 Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = nullptr);
724
725 // Replace the call with the current state of the kit. Requires
726 // that the call was generated with separate io_projs so that
727 // exceptional control flow can be handled properly.
728 void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false, bool do_asserts = true);
729
730 // helper functions for statistics
731 void increment_counter(address counter_addr); // increment a debug counter
732 void increment_counter(Node* counter_addr); // increment a debug counter
733
734 // Bail out to the interpreter right now
735 // The optional klass is the one causing the trap.
736 // The optional reason is debug information written to the compile log.
737 // Optional must_throw is the same as with add_safepoint_edges.
738 Node* uncommon_trap(int trap_request,
739 ciKlass* klass = nullptr, const char* reason_string = nullptr,
740 bool must_throw = false, bool keep_exact_action = false);
741
742 // Shorthand, to avoid saying "Deoptimization::" so many times.
743 Node* uncommon_trap(Deoptimization::DeoptReason reason,
744 Deoptimization::DeoptAction action,
745 ciKlass* klass = nullptr, const char* reason_string = nullptr,
746 bool must_throw = false, bool keep_exact_action = false) {
747 return uncommon_trap(Deoptimization::make_trap_request(reason, action),
748 klass, reason_string, must_throw, keep_exact_action);
749 }
750
751 // Bail out to the interpreter and keep exact action (avoid switching to Action_none).
752 Node* uncommon_trap_exact(Deoptimization::DeoptReason reason,
753 Deoptimization::DeoptAction action,
754 ciKlass* klass = nullptr, const char* reason_string = nullptr,
755 bool must_throw = false) {
756 return uncommon_trap(Deoptimization::make_trap_request(reason, action),
757 klass, reason_string, must_throw, /*keep_exact_action=*/true);
758 }
759
760 // SP when bytecode needs to be reexecuted.
761 virtual int reexecute_sp() { return sp(); }
762
763 // Report if there were too many traps at the current method and bci.
764 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
765 // If there is no MDO at all, report no trap unless told to assume it.
766 bool too_many_traps(Deoptimization::DeoptReason reason) {
767 return C->too_many_traps(method(), bci(), reason);
768 }
769
770 // Report if there were too many recompiles at the current method and bci.
771 bool too_many_recompiles(Deoptimization::DeoptReason reason) {
772 return C->too_many_recompiles(method(), bci(), reason);
773 }
774
775 bool too_many_traps_or_recompiles(Deoptimization::DeoptReason reason) {
776 return C->too_many_traps_or_recompiles(method(), bci(), reason);
777 }
778
779 // Returns the object (if any) which was created the moment before.
780 Node* just_allocated_object(Node* current_control);
781
782 // Sync Ideal and Graph kits.
783 void sync_kit(IdealKit& ideal);
784 void final_sync(IdealKit& ideal);
785
786 public:
787 // Helper functions for fast/slow path codes
788 Node* opt_iff(Node* region, Node* iff);
789 Node* make_runtime_call(int flags,
790 const TypeFunc* call_type, address call_addr,
791 const char* call_name,
792 const TypePtr* adr_type, // null if no memory effects
793 Node* parm0 = nullptr, Node* parm1 = nullptr,
794 Node* parm2 = nullptr, Node* parm3 = nullptr,
795 Node* parm4 = nullptr, Node* parm5 = nullptr,
796 Node* parm6 = nullptr, Node* parm7 = nullptr);
797
798 Node* sign_extend_byte(Node* in);
799 Node* sign_extend_short(Node* in);
800
801 enum { // flag values for make_runtime_call
802 RC_NO_FP = 1, // CallLeafNoFPNode
803 RC_NO_IO = 2, // do not hook IO edges
804 RC_NO_LEAF = 4, // CallStaticJavaNode
805 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges
806 RC_NARROW_MEM = 16, // input memory is same as output
807 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap
808 RC_VECTOR = 64, // CallLeafVectorNode
809 RC_PURE = 128, // CallLeaf is pure
810 RC_LEAF = 0 // null value: no flags set
811 };
812
813 // merge in all memory slices from new_mem, along the given path
814 void merge_memory(Node* new_mem, Node* region, int new_path);
815 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
816
817 // Helper functions to build synchronizations
818 int next_monitor();
819 Node* insert_mem_bar(int opcode, Node* precedent = nullptr);
820 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr);
821 // Optional 'precedent' is appended as an extra edge, to force ordering.
822 FastLockNode* shared_lock(Node* obj);
823 void shared_unlock(Node* box, Node* obj);
824
825 // helper functions for the fast path/slow path idioms
826 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
827
828 // Generate an instance-of idiom. Used by both the instance-of bytecode
829 // and the reflective instance-of call.
830 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
831
832 // Generate a check-cast idiom. Used by both the check-cast bytecode
833 // and the array-store bytecode
834 Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = nullptr, bool null_free = false, bool maybe_larval = false);
835
836 // Inline types
837 Node* mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock = true);
838 Node* inline_type_test(Node* obj, bool is_inline = true);
839 Node* flat_array_test(Node* array_or_klass, bool flat = true);
840 Node* null_free_array_test(Node* array, bool null_free = true);
841 Node* null_free_atomic_array_test(Node* array, ciInlineKlass* vk);
842 Node* inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace = false);
843
844 Node* gen_subtype_check(Node* obj, Node* superklass);
845
846 // Exact type check used for predicted calls and casts.
847 // Rewrites (*casted_receiver) to be casted to the stronger type.
848 // (Caller is responsible for doing replace_in_map.)
849 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
850 Node* *casted_receiver);
851 Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob);
852
853 // Inexact type check used for predicted calls.
854 Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
855 Node** casted_receiver);
856
857 // implementation of object creation
858 Node* set_output_for_allocation(AllocateNode* alloc,
859 const TypeOopPtr* oop_type,
860 bool deoptimize_on_exception=false);
861 Node* get_layout_helper(Node* klass_node, jint& constant_value);
862 Node* new_instance(Node* klass_node,
863 Node* slow_test = nullptr,
864 Node* *return_size_val = nullptr,
865 bool deoptimize_on_exception = false,
866 InlineTypeNode* inline_type_node = nullptr);
867 Node* new_array(Node* klass_node, Node* count_val, int nargs,
868 Node* *return_size_val = nullptr,
869 bool deoptimize_on_exception = false,
870 Node* init_val = nullptr);
871
872 // java.lang.String helpers
873 Node* load_String_length(Node* str, bool set_ctrl);
874 Node* load_String_value(Node* str, bool set_ctrl);
875 Node* load_String_coder(Node* str, bool set_ctrl);
876 void store_String_value(Node* str, Node* value);
877 void store_String_coder(Node* str, Node* value);
878 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
879 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
880 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
881 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
882
883 // Handy for making control flow
884 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
885 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
886 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
887 // Place 'if' on worklist if it will be in graph
888 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
889 return iff;
890 }
891
892 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
893 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
894 _gvn.transform(iff); // Value may be known at parse-time
895 // Place 'if' on worklist if it will be in graph
896 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
897 return iff;
898 }
899
900 void add_parse_predicates(int nargs = 0);
901 void add_parse_predicate(Deoptimization::DeoptReason reason, int nargs);
902
903 Node* make_constant_from_field(ciField* field, Node* obj);
904 Node* load_mirror_from_klass(Node* klass);
905
906 // Vector API support (implemented in vectorIntrinsics.cpp)
907 Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
908 Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem);
909 Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
910 };
911
912 // Helper class to support building of control flow branches. Upon
913 // creation the map and sp at bci are cloned and restored upon de-
914 // struction. Typical use:
915 //
916 // { PreserveJVMState pjvms(this);
917 // // code of new branch
918 // }
919 // // here the JVM state at bci is established
920
921 class PreserveJVMState: public StackObj {
922 protected:
923 GraphKit* _kit;
924 #ifdef ASSERT
925 int _block; // PO of current block, if a Parse
926 int _bci;
927 #endif
928 SafePointNode* _map;
929 uint _sp;
930
931 public:
932 PreserveJVMState(GraphKit* kit, bool clone_map = true);
933 ~PreserveJVMState();
934 };
935
936 // Helper class to build cutouts of the form if (p) ; else {x...}.
937 // The code {x...} must not fall through.
938 // The kit's main flow of control is set to the "then" continuation of if(p).
939 class BuildCutout: public PreserveJVMState {
940 public:
941 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
942 ~BuildCutout();
943 };
944
945 // Helper class to preserve the original _reexecute bit and _sp and restore
946 // them back
947 class PreserveReexecuteState: public StackObj {
948 protected:
949 GraphKit* _kit;
950 uint _sp;
951 JVMState::ReexecuteState _reexecute;
952
953 public:
954 PreserveReexecuteState(GraphKit* kit);
955 ~PreserveReexecuteState();
956 };
957
958 #endif // SHARE_OPTO_GRAPHKIT_HPP