1 /*
2 * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_GRAPHKIT_HPP
26 #define SHARE_OPTO_GRAPHKIT_HPP
27
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciMethodData.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/callnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/compile.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/mulnode.hpp"
37 #include "opto/phaseX.hpp"
38 #include "opto/subnode.hpp"
39 #include "opto/type.hpp"
40 #include "runtime/deoptimization.hpp"
41
42 class BarrierSetC2;
43 class FastLockNode;
44 class FastUnlockNode;
45 class IdealKit;
46 class LibraryCallKit;
47 class Parse;
48 class RootNode;
49
50 //-----------------------------------------------------------------------------
51 //----------------------------GraphKit-----------------------------------------
52 // Toolkit for building the common sorts of subgraphs.
53 // Does not know about bytecode parsing or type-flow results.
54 // It is able to create graphs implementing the semantics of most
55 // or all bytecodes, so that it can expand intrinsics and calls.
56 // It may depend on JVMState structure, but it must not depend
57 // on specific bytecode streams.
58 class GraphKit : public Phase {
59 friend class PreserveJVMState;
60
61 protected:
62 ciEnv* _env; // Compilation environment
63 PhaseGVN &_gvn; // Some optimizations while parsing
64 SafePointNode* _map; // Parser map from JVM to Nodes
65 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
66 int _bci; // JVM Bytecode Pointer
67 ciMethod* _method; // JVM Current Method
68 BarrierSetC2* _barrier_set;
69
70 private:
71 int _sp; // JVM Expression Stack Pointer; don't modify directly!
72
73 private:
74 SafePointNode* map_not_null() const {
75 assert(_map != nullptr, "must call stopped() to test for reset compiler map");
76 return _map;
77 }
78
79 public:
80 GraphKit(); // empty constructor
81 GraphKit(JVMState* jvms); // the JVM state on which to operate
82
83 #ifdef ASSERT
84 ~GraphKit() {
85 assert(failing_internal() || !has_exceptions(),
86 "unless compilation failed, user must call transfer_exceptions_into_jvms");
87 }
88 #endif
89
90 virtual Parse* is_Parse() const { return nullptr; }
91 virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; }
92
93 ciEnv* env() const { return _env; }
94 PhaseGVN& gvn() const { return _gvn; }
95 void* barrier_set_state() const { return C->barrier_set_state(); }
96
97 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
98 void remove_for_igvn(Node* n) const { C->remove_for_igvn(n); }
99
100 // Handy well-known nodes:
101 Node* null() const { return zerocon(T_OBJECT); }
102 Node* top() const { return C->top(); }
103 RootNode* root() const { return C->root(); }
104
105 // Create or find a constant node
106 Node* intcon(jint con) const { return _gvn.intcon(con); }
107 Node* longcon(jlong con) const { return _gvn.longcon(con); }
108 Node* integercon(jlong con, BasicType bt) const {
109 if (bt == T_INT) {
110 return intcon(checked_cast<jint>(con));
111 }
112 assert(bt == T_LONG, "basic type not an int or long");
113 return longcon(con);
114 }
115 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
116 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
117 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
118
119 jint find_int_con(Node* n, jint value_if_unknown) {
120 return _gvn.find_int_con(n, value_if_unknown);
121 }
122 jlong find_long_con(Node* n, jlong value_if_unknown) {
123 return _gvn.find_long_con(n, value_if_unknown);
124 }
125 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
126
127 // JVM State accessors:
128 // Parser mapping from JVM indices into Nodes.
129 // Low slots are accessed by the StartNode::enum.
130 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
131 // Then come JVM stack slots.
132 // Finally come the monitors, if any.
133 // See layout accessors in class JVMState.
134
135 SafePointNode* map() const { return _map; }
136 bool has_exceptions() const { return _exceptions != nullptr; }
137 JVMState* jvms() const { return map_not_null()->_jvms; }
138 int sp() const { return _sp; }
139 int bci() const { return _bci; }
140 Bytecodes::Code java_bc() const;
141 ciMethod* method() const { return _method; }
142
143 void set_jvms(JVMState* jvms) { set_map(jvms->map());
144 assert(jvms == this->jvms(), "sanity");
145 _sp = jvms->sp();
146 _bci = jvms->bci();
147 _method = jvms->has_method() ? jvms->method() : nullptr; }
148 void set_map(SafePointNode* m) { _map = m; DEBUG_ONLY(verify_map()); }
149 void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; }
150 void clean_stack(int from_sp); // clear garbage beyond from_sp to top
151
152 void inc_sp(int i) { set_sp(sp() + i); }
153 void dec_sp(int i) { set_sp(sp() - i); }
154 void set_bci(int bci) { _bci = bci; }
155
156 // Make sure jvms has current bci & sp.
157 JVMState* sync_jvms() const;
158 JVMState* sync_jvms_for_reexecute();
159
160 #ifdef ASSERT
161 // Make sure JVMS has an updated copy of bci and sp.
162 // Also sanity-check method, depth, and monitor depth.
163 bool jvms_in_sync() const;
164
165 // Make sure the map looks OK.
166 void verify_map() const;
167
168 // Make sure a proposed exception state looks OK.
169 static void verify_exception_state(SafePointNode* ex_map);
170 #endif
171
172 // Clone the existing map state. (Implements PreserveJVMState.)
173 SafePointNode* clone_map();
174
175 // Reverses the work done by clone_map(). Should only be used when the node returned by
176 // clone_map() is ultimately not used. Calling Node::destruct directly in the previously
177 // mentioned circumstance instead of this method may result in use-after-free.
178 void destruct_map_clone(SafePointNode* sfp);
179
180 // Set the map to a clone of the given one.
181 void set_map_clone(SafePointNode* m);
182
183 // Tell if the compilation is failing.
184 bool failing() const { return C->failing(); }
185 bool failing_internal() const { return C->failing_internal(); }
186
187 // Set _map to null, signalling a stop to further bytecode execution.
188 // Preserve the map intact for future use, and return it back to the caller.
189 SafePointNode* stop() { SafePointNode* m = map(); set_map(nullptr); return m; }
190
191 // Stop, but first smash the map's inputs to null, to mark it dead.
192 void stop_and_kill_map();
193
194 // Tell if _map is null, or control is top.
195 bool stopped();
196
197 // Tell if this method or any caller method has exception handlers.
198 bool has_exception_handler();
199
200 // Save an exception without blowing stack contents or other JVM state.
201 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
202 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
203
204 // Recover a saved exception from its map.
205 static Node* saved_ex_oop(SafePointNode* ex_map);
206
207 // Recover a saved exception from its map, and remove it from the map.
208 static Node* clear_saved_ex_oop(SafePointNode* ex_map);
209
210 #ifdef ASSERT
211 // Recover a saved exception from its map, and remove it from the map.
212 static bool has_saved_ex_oop(SafePointNode* ex_map);
213 #endif
214
215 // Push an exception in the canonical position for handlers (stack(0)).
216 void push_ex_oop(Node* ex_oop) {
217 ensure_stack(1); // ensure room to push the exception
218 set_stack(0, ex_oop);
219 set_sp(1);
220 clean_stack(1);
221 }
222
223 // Detach and return an exception state.
224 SafePointNode* pop_exception_state() {
225 SafePointNode* ex_map = _exceptions;
226 if (ex_map != nullptr) {
227 _exceptions = ex_map->next_exception();
228 ex_map->set_next_exception(nullptr);
229 DEBUG_ONLY(verify_exception_state(ex_map));
230 }
231 return ex_map;
232 }
233
234 // Add an exception, using the given JVM state, without commoning.
235 void push_exception_state(SafePointNode* ex_map) {
236 DEBUG_ONLY(verify_exception_state(ex_map));
237 ex_map->set_next_exception(_exceptions);
238 _exceptions = ex_map;
239 }
240
241 // Turn the current JVM state into an exception state, appending the ex_oop.
242 SafePointNode* make_exception_state(Node* ex_oop);
243
244 // Add an exception, using the given JVM state.
245 // Combine all exceptions with a common exception type into a single state.
246 // (This is done via combine_exception_states.)
247 void add_exception_state(SafePointNode* ex_map);
248
249 // Combine all exceptions of any sort whatever into a single master state.
250 SafePointNode* combine_and_pop_all_exception_states() {
251 if (_exceptions == nullptr) return nullptr;
252 SafePointNode* phi_map = pop_exception_state();
253 SafePointNode* ex_map;
254 while ((ex_map = pop_exception_state()) != nullptr) {
255 combine_exception_states(ex_map, phi_map);
256 }
257 return phi_map;
258 }
259
260 // Combine the two exception states, building phis as necessary.
261 // The second argument is updated to include contributions from the first.
262 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
263
264 // Reset the map to the given state. If there are any half-finished phis
265 // in it (created by combine_exception_states), transform them now.
266 // Returns the exception oop. (Caller must call push_ex_oop if required.)
267 Node* use_exception_state(SafePointNode* ex_map);
268
269 // Collect exceptions from a given JVM state into my exception list.
270 void add_exception_states_from(JVMState* jvms);
271
272 // Collect all raised exceptions into the current JVM state.
273 // Clear the current exception list and map, returns the combined states.
274 JVMState* transfer_exceptions_into_jvms();
275
276 // Helper to throw a built-in exception.
277 // The JVMS must allow the bytecode to be re-executed via an uncommon trap.
278 void builtin_throw(Deoptimization::DeoptReason reason);
279 void builtin_throw(Deoptimization::DeoptReason reason,
280 ciInstance* exception_object,
281 bool allow_too_many_traps);
282 bool builtin_throw_too_many_traps(Deoptimization::DeoptReason reason,
283 ciInstance* exception_object);
284 private:
285 bool is_builtin_throw_hot(Deoptimization::DeoptReason reason);
286 ciInstance* builtin_throw_exception(Deoptimization::DeoptReason reason) const;
287
288 public:
289
290 // Helper to check the JavaThread::_should_post_on_exceptions flag
291 // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
292 void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
293 bool must_throw) ;
294
295 // Helper Functions for adding debug information
296 void kill_dead_locals();
297 #ifdef ASSERT
298 bool dead_locals_are_killed();
299 #endif
300 // The call may deoptimize. Supply required JVM state as debug info.
301 // If must_throw is true, the call is guaranteed not to return normally.
302 void add_safepoint_edges(SafePointNode* call,
303 bool must_throw = false);
304
305 // How many stack inputs does the current BC consume?
306 // And, how does the stack change after the bytecode?
307 // Returns false if unknown.
308 bool compute_stack_effects(int& inputs, int& depth);
309
310 // Add a fixed offset to a pointer
311 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
312 return basic_plus_adr(base, ptr, MakeConX(offset));
313 }
314 Node* basic_plus_adr(Node* base, intptr_t offset) {
315 return basic_plus_adr(base, base, MakeConX(offset));
316 }
317 // Add a variable offset to a pointer
318 Node* basic_plus_adr(Node* base, Node* offset) {
319 return basic_plus_adr(base, base, offset);
320 }
321 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
322
323 Node* off_heap_plus_addr(Node* ptr, intptr_t offset) {
324 return basic_plus_adr(top(), ptr, MakeConX(offset));
325 }
326
327 Node* off_heap_plus_addr(Node* ptr, Node* offset) {
328 return basic_plus_adr(top(), ptr, offset);
329 }
330
331 // Some convenient shortcuts for common nodes
332 Node* IfTrue(IfNode* iff) { return _gvn.transform(new IfTrueNode(iff)); }
333 Node* IfFalse(IfNode* iff) { return _gvn.transform(new IfFalseNode(iff)); }
334
335 Node* AddI(Node* l, Node* r) { return _gvn.transform(new AddINode(l, r)); }
336 Node* SubI(Node* l, Node* r) { return _gvn.transform(new SubINode(l, r)); }
337 Node* MulI(Node* l, Node* r) { return _gvn.transform(new MulINode(l, r)); }
338 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new DivINode(ctl, l, r)); }
339
340 Node* AndI(Node* l, Node* r) { return _gvn.transform(new AndINode(l, r)); }
341 Node* OrI(Node* l, Node* r) { return _gvn.transform(new OrINode(l, r)); }
342 Node* XorI(Node* l, Node* r) { return _gvn.transform(new XorINode(l, r)); }
343
344 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new MaxINode(l, r)); }
345 Node* MinI(Node* l, Node* r) { return _gvn.transform(new MinINode(l, r)); }
346
347 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new LShiftINode(l, r)); }
348 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new RShiftINode(l, r)); }
349 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new URShiftINode(l, r)); }
350
351 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new CmpINode(l, r)); }
352 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new CmpLNode(l, r)); }
353 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new CmpPNode(l, r)); }
354 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); }
355
356 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(AddPNode::make_with_base(b, a, o)); }
357
358 // Convert between int and long, and size_t.
359 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
360 Node* ConvI2L(Node* offset);
361 Node* ConvI2UL(Node* offset);
362 Node* ConvL2I(Node* offset);
363 // Find out the klass of an object.
364 Node* load_object_klass(Node* object);
365 // Find out the length of an array.
366 Node* load_array_length(Node* array);
367 // Cast array allocation's length as narrow as possible.
368 // If replace_length_in_map is true, replace length with CastIINode in map.
369 // This method is invoked after creating/moving ArrayAllocationNode or in load_array_length
370 Node* array_ideal_length(AllocateArrayNode* alloc,
371 const TypeOopPtr* oop_type,
372 bool replace_length_in_map);
373
374
375 // Helper function to do a null pointer check or ZERO check based on type.
376 // Throw an exception if a given value is null.
377 // Return the value cast to not-null.
378 // Be clever about equivalent dominating null checks.
379 Node* null_check_common(Node* value, BasicType type,
380 bool assert_null = false,
381 Node* *null_control = nullptr,
382 bool speculative = false);
383 Node* null_check(Node* value, BasicType type = T_OBJECT) {
384 return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null());
385 }
386 Node* null_check_receiver() {
387 assert(argument(0)->bottom_type()->isa_ptr(), "must be");
388 return null_check(argument(0));
389 }
390 Node* zero_check_int(Node* value) {
391 assert(value->bottom_type()->basic_type() == T_INT,
392 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
393 return null_check_common(value, T_INT);
394 }
395 Node* zero_check_long(Node* value) {
396 assert(value->bottom_type()->basic_type() == T_LONG,
397 "wrong type: %s", type2name(value->bottom_type()->basic_type()));
398 return null_check_common(value, T_LONG);
399 }
400 // Throw an uncommon trap if a given value is __not__ null.
401 // Return the value cast to null, and be clever about dominating checks.
402 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
403 return null_check_common(value, type, true, nullptr, _gvn.type(value)->speculative_always_null());
404 }
405
406 // Check if value is null and abort if it is
407 Node* must_be_not_null(Node* value, bool do_replace_in_map);
408
409 // Null check oop. Return null-path control into (*null_control).
410 // Return a cast-not-null node which depends on the not-null control.
411 // If never_see_null, use an uncommon trap (*null_control sees a top).
412 // The cast is not valid along the null path; keep a copy of the original.
413 // If safe_for_replace, then we can replace the value with the cast
414 // in the parsing map (the cast is guaranteed to dominate the map)
415 Node* null_check_oop(Node* value, Node* *null_control,
416 bool never_see_null = false,
417 bool safe_for_replace = false,
418 bool speculative = false);
419
420 // Check the null_seen bit.
421 bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
422
423 void guard_klass_being_initialized(Node* klass);
424 void guard_init_thread(Node* klass);
425
426 void clinit_barrier(ciInstanceKlass* ik, ciMethod* context);
427
428 // Check for unique class for receiver at call
429 ciKlass* profile_has_unique_klass() {
430 ciCallProfile profile = method()->call_profile_at_bci(bci());
431 if (profile.count() >= 0 && // no cast failures here
432 profile.has_receiver(0) &&
433 profile.morphism() == 1) {
434 return profile.receiver(0);
435 }
436 return nullptr;
437 }
438
439 // record type from profiling with the type system
440 Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind);
441 void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
442 void record_profiled_parameters_for_speculation();
443 void record_profiled_return_for_speculation();
444 Node* record_profiled_receiver_for_speculation(Node* n);
445
446 // Use the type profile to narrow an object type.
447 Node* maybe_cast_profiled_receiver(Node* not_null_obj,
448 const TypeKlassPtr* require_klass,
449 ciKlass* spec,
450 bool safe_for_replace);
451
452 // Cast obj to type and emit guard unless we had too many traps here already
453 Node* maybe_cast_profiled_obj(Node* obj,
454 ciKlass* type,
455 bool not_null = false);
456
457 // Cast obj to not-null on this path
458 Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
459 // Replace all occurrences of one node by another.
460 void replace_in_map(Node* old, Node* neww);
461
462 Node* maybe_narrow_object_type(Node* obj, ciKlass* type);
463
464 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); }
465 Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); }
466 Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); }
467
468 void push_pair(Node* ldval) {
469 push(ldval);
470 push(top()); // the halfword is merely a placeholder
471 }
472 void push_pair_local(int i) {
473 // longs are stored in locals in "push" order
474 push( local(i+0) ); // the real value
475 assert(local(i+1) == top(), "");
476 push(top()); // halfword placeholder
477 }
478 Node* pop_pair() {
479 // the second half is pushed last & popped first; it contains exactly nothing
480 Node* halfword = pop();
481 assert(halfword == top(), "");
482 // the long bits are pushed first & popped last:
483 return pop();
484 }
485 void set_pair_local(int i, Node* lval) {
486 // longs are stored in locals as a value/half pair (like doubles)
487 set_local(i+0, lval);
488 set_local(i+1, top());
489 }
490
491 // Push the node, which may be zero, one, or two words.
492 void push_node(BasicType n_type, Node* n) {
493 int n_size = type2size[n_type];
494 if (n_size == 1) push( n ); // T_INT, ...
495 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG
496 else { assert(n_size == 0, "must be T_VOID"); }
497 }
498
499 Node* pop_node(BasicType n_type) {
500 int n_size = type2size[n_type];
501 if (n_size == 1) return pop();
502 else if (n_size == 2) return pop_pair();
503 else return nullptr;
504 }
505
506 Node* control() const { return map_not_null()->control(); }
507 Node* i_o() const { return map_not_null()->i_o(); }
508 Node* returnadr() const { return map_not_null()->returnadr(); }
509 Node* frameptr() const { return map_not_null()->frameptr(); }
510 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); }
511 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); }
512 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); }
513 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
514 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
515
516 void set_control (Node* c) { map_not_null()->set_control(c); }
517 void set_i_o (Node* c) { map_not_null()->set_i_o(c); }
518 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); }
519 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); }
520 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
521 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
522
523 // Access unaliased memory
524 Node* memory(uint alias_idx);
525 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
526 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
527
528 // Access immutable memory
529 Node* immutable_memory() { return C->immutable_memory(); }
530
531 // Set unaliased memory
532 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
533 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
534 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
535
536 // Get the entire memory state (probably a MergeMemNode), and reset it
537 // (The resetting prevents somebody from using the dangling Node pointer.)
538 Node* reset_memory();
539
540 // Get the entire memory state, asserted to be a MergeMemNode.
541 MergeMemNode* merged_memory() {
542 Node* mem = map_not_null()->memory();
543 assert(mem->is_MergeMem(), "parse memory is always pre-split");
544 return mem->as_MergeMem();
545 }
546
547 // Set the entire memory state; produce a new MergeMemNode.
548 void set_all_memory(Node* newmem);
549
550 // Create a memory projection from the call, then set_all_memory.
551 void set_all_memory_call(Node* call, bool separate_io_proj = false);
552
553 // Create a LoadNode, reading from the parser's memory state.
554 // (Note: require_atomic_access is useful only with T_LONG.)
555 //
556 // We choose the unordered semantics by default because we have
557 // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
558 // of volatile fields.
559 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
560 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
561 bool require_atomic_access = false, bool unaligned = false,
562 bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
563
564 // Create & transform a StoreNode and store the effect into the
565 // parser's memory state.
566 //
567 // We must ensure that stores of object references will be visible
568 // only after the object's initialization. So the clients of this
569 // procedure must indicate that the store requires `release'
570 // semantics, if the stored value is an object reference that might
571 // point to a new object and may become externally visible.
572 // Return the new StoreXNode
573 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
574 MemNode::MemOrd,
575 bool require_atomic_access = false,
576 bool unaligned = false,
577 bool mismatched = false,
578 bool unsafe = false,
579 int barrier_data = 0);
580
581 // Perform decorated accesses
582
583 Node* access_store_at(Node* obj, // containing obj
584 Node* adr, // actual address to store val at
585 const TypePtr* adr_type,
586 Node* val,
587 const Type* val_type,
588 BasicType bt,
589 DecoratorSet decorators);
590
591 Node* access_load_at(Node* obj, // containing obj
592 Node* adr, // actual address to load val at
593 const TypePtr* adr_type,
594 const Type* val_type,
595 BasicType bt,
596 DecoratorSet decorators);
597
598 Node* access_load(Node* adr, // actual address to load val at
599 const Type* val_type,
600 BasicType bt,
601 DecoratorSet decorators);
602
603 Node* access_atomic_cmpxchg_val_at(Node* obj,
604 Node* adr,
605 const TypePtr* adr_type,
606 int alias_idx,
607 Node* expected_val,
608 Node* new_val,
609 const Type* value_type,
610 BasicType bt,
611 DecoratorSet decorators);
612
613 Node* access_atomic_cmpxchg_bool_at(Node* obj,
614 Node* adr,
615 const TypePtr* adr_type,
616 int alias_idx,
617 Node* expected_val,
618 Node* new_val,
619 const Type* value_type,
620 BasicType bt,
621 DecoratorSet decorators);
622
623 Node* access_atomic_xchg_at(Node* obj,
624 Node* adr,
625 const TypePtr* adr_type,
626 int alias_idx,
627 Node* new_val,
628 const Type* value_type,
629 BasicType bt,
630 DecoratorSet decorators);
631
632 Node* access_atomic_add_at(Node* obj,
633 Node* adr,
634 const TypePtr* adr_type,
635 int alias_idx,
636 Node* new_val,
637 const Type* value_type,
638 BasicType bt,
639 DecoratorSet decorators);
640
641 void access_clone(Node* src, Node* dst, Node* size, bool is_array);
642
643 // Return addressing for an array element.
644 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
645 // Optional constraint on the array size:
646 const TypeInt* sizetype = nullptr,
647 // Optional control dependency (for example, on range check)
648 Node* ctrl = nullptr);
649
650 // Return a load of array element at idx.
651 Node* load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl);
652
653 //---------------- Dtrace support --------------------
654 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
655 void make_dtrace_method_entry(ciMethod* method) {
656 make_dtrace_method_entry_exit(method, true);
657 }
658 void make_dtrace_method_exit(ciMethod* method) {
659 make_dtrace_method_entry_exit(method, false);
660 }
661
662 //--------------- stub generation -------------------
663 public:
664 void gen_stub(address C_function,
665 const char *name,
666 int is_fancy_jump,
667 bool pass_tls,
668 bool return_pc);
669
670 //---------- help for generating calls --------------
671
672 // Do a null check on the receiver as it would happen before the call to
673 // callee (with all arguments still on the stack).
674 Node* null_check_receiver_before_call(ciMethod* callee) {
675 assert(!callee->is_static(), "must be a virtual method");
676 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
677 // Use callsite signature always.
678 ciMethod* declared_method = method()->get_method_at_bci(bci());
679 const int nargs = declared_method->arg_size();
680 inc_sp(nargs);
681 Node* n = null_check_receiver();
682 dec_sp(nargs);
683 return n;
684 }
685
686 // Fill in argument edges for the call from argument(0), argument(1), ...
687 // (The next step is to call set_edges_for_java_call.)
688 void set_arguments_for_java_call(CallJavaNode* call);
689
690 // Fill in non-argument edges for the call.
691 // Transform the call, and update the basics: control, i_o, memory.
692 // (The next step is usually to call set_results_for_java_call.)
693 void set_edges_for_java_call(CallJavaNode* call,
694 bool must_throw = false, bool separate_io_proj = false);
695
696 // Finish up a java call that was started by set_edges_for_java_call.
697 // Call add_exception on any throw arising from the call.
698 // Return the call result (transformed).
699 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
700
701 // Similar to set_edges_for_java_call, but simplified for runtime calls.
702 void set_predefined_output_for_runtime_call(Node* call) {
703 set_predefined_output_for_runtime_call(call, nullptr, nullptr);
704 }
705 void set_predefined_output_for_runtime_call(Node* call,
706 Node* keep_mem,
707 const TypePtr* hook_mem);
708 Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = nullptr);
709
710 // Replace the call with the current state of the kit. Requires
711 // that the call was generated with separate io_projs so that
712 // exceptional control flow can be handled properly.
713 void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false, bool do_asserts = true);
714
715 // helper functions for statistics
716 void increment_counter(address counter_addr); // increment a debug counter
717 void increment_counter(Node* counter_addr); // increment a debug counter
718
719 void halt(Node* ctrl, Node* frameptr, const char* reason, bool generate_code_in_product = true);
720
721 // Bail out to the interpreter right now
722 // The optional klass is the one causing the trap.
723 // The optional reason is debug information written to the compile log.
724 // Optional must_throw is the same as with add_safepoint_edges.
725 Node* uncommon_trap(int trap_request,
726 ciKlass* klass = nullptr, const char* reason_string = nullptr,
727 bool must_throw = false, bool keep_exact_action = false);
728
729 // Shorthand, to avoid saying "Deoptimization::" so many times.
730 Node* uncommon_trap(Deoptimization::DeoptReason reason,
731 Deoptimization::DeoptAction action,
732 ciKlass* klass = nullptr, const char* reason_string = nullptr,
733 bool must_throw = false, bool keep_exact_action = false) {
734 return uncommon_trap(Deoptimization::make_trap_request(reason, action),
735 klass, reason_string, must_throw, keep_exact_action);
736 }
737
738 // Bail out to the interpreter and keep exact action (avoid switching to Action_none).
739 Node* uncommon_trap_exact(Deoptimization::DeoptReason reason,
740 Deoptimization::DeoptAction action,
741 ciKlass* klass = nullptr, const char* reason_string = nullptr,
742 bool must_throw = false) {
743 return uncommon_trap(Deoptimization::make_trap_request(reason, action),
744 klass, reason_string, must_throw, /*keep_exact_action=*/true);
745 }
746
747 // SP when bytecode needs to be reexecuted.
748 virtual int reexecute_sp() { return sp(); }
749
750 // Report if there were too many traps at the current method and bci.
751 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
752 // If there is no MDO at all, report no trap unless told to assume it.
753 bool too_many_traps(Deoptimization::DeoptReason reason) {
754 return C->too_many_traps(method(), bci(), reason);
755 }
756
757 // Report if there were too many recompiles at the current method and bci.
758 bool too_many_recompiles(Deoptimization::DeoptReason reason) {
759 return C->too_many_recompiles(method(), bci(), reason);
760 }
761
762 bool too_many_traps_or_recompiles(Deoptimization::DeoptReason reason) {
763 return C->too_many_traps_or_recompiles(method(), bci(), reason);
764 }
765
766 // Returns the object (if any) which was created the moment before.
767 Node* just_allocated_object(Node* current_control);
768
769 // Sync Ideal and Graph kits.
770 void sync_kit(IdealKit& ideal);
771 void final_sync(IdealKit& ideal);
772
773 public:
774 // Helper functions for fast/slow path codes
775 Node* opt_iff(Node* region, Node* iff);
776 Node* make_runtime_call(int flags,
777 const TypeFunc* call_type, address call_addr,
778 const char* call_name,
779 const TypePtr* adr_type, // null if no memory effects
780 Node* parm0 = nullptr, Node* parm1 = nullptr,
781 Node* parm2 = nullptr, Node* parm3 = nullptr,
782 Node* parm4 = nullptr, Node* parm5 = nullptr,
783 Node* parm6 = nullptr, Node* parm7 = nullptr);
784
785 Node* sign_extend_byte(Node* in);
786 Node* sign_extend_short(Node* in);
787
788 enum { // flag values for make_runtime_call
789 RC_NO_FP = 1, // CallLeafNoFPNode
790 RC_NO_IO = 2, // do not hook IO edges
791 RC_NO_LEAF = 4, // CallStaticJavaNode
792 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges
793 RC_NARROW_MEM = 16, // input memory is same as output
794 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap
795 RC_VECTOR = 64, // CallLeafVectorNode
796 RC_PURE = 128, // CallLeaf is pure
797 RC_LEAF = 0 // null value: no flags set
798 };
799
800 // merge in all memory slices from new_mem, along the given path
801 void merge_memory(Node* new_mem, Node* region, int new_path);
802 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
803
804 // Helper functions to build synchronizations
805 int next_monitor();
806 Node* insert_mem_bar(int opcode, Node* precedent = nullptr);
807 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr);
808 // Optional 'precedent' is appended as an extra edge, to force ordering.
809 FastLockNode* shared_lock(Node* obj);
810 void shared_unlock(Node* box, Node* obj);
811
812 // helper functions for the fast path/slow path idioms
813 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
814
815 // Generate an instance-of idiom. Used by both the instance-of bytecode
816 // and the reflective instance-of call.
817 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
818
819 // Generate a check-cast idiom. Used by both the check-cast bytecode
820 // and the array-store bytecode
821 Node* gen_checkcast( Node *subobj, Node* superkls,
822 Node* *failure_control = nullptr );
823
824 Node* gen_subtype_check(Node* obj, Node* superklass);
825
826 // Exact type check used for predicted calls and casts.
827 // Rewrites (*casted_receiver) to be casted to the stronger type.
828 // (Caller is responsible for doing replace_in_map.)
829 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
830 Node* *casted_receiver);
831
832 // Inexact type check used for predicted calls.
833 Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
834 Node** casted_receiver);
835
836 // implementation of object creation
837 Node* set_output_for_allocation(AllocateNode* alloc,
838 const TypeOopPtr* oop_type,
839 bool deoptimize_on_exception=false);
840 Node* get_layout_helper(Node* klass_node, jint& constant_value);
841 Node* new_instance(Node* klass_node,
842 Node* slow_test = nullptr,
843 Node* *return_size_val = nullptr,
844 bool deoptimize_on_exception = false);
845 Node* new_array(Node* klass_node, Node* count_val, int nargs,
846 Node* *return_size_val = nullptr,
847 bool deoptimize_on_exception = false);
848
849 // java.lang.String helpers
850 Node* load_String_length(Node* str, bool set_ctrl);
851 Node* load_String_value(Node* str, bool set_ctrl);
852 Node* load_String_coder(Node* str, bool set_ctrl);
853 void store_String_value(Node* str, Node* value);
854 void store_String_coder(Node* str, Node* value);
855 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
856 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
857 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
858 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
859
860 // Handy for making control flow
861 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
862 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
863 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
864 // Place 'if' on worklist if it will be in graph
865 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
866 return iff;
867 }
868
869 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
870 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
871 _gvn.transform(iff); // Value may be known at parse-time
872 // Place 'if' on worklist if it will be in graph
873 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
874 return iff;
875 }
876
877 void add_parse_predicates(int nargs = 0);
878 void add_parse_predicate(Deoptimization::DeoptReason reason, int nargs);
879
880 Node* make_constant_from_field(ciField* field, Node* obj);
881
882 // Vector API support (implemented in vectorIntrinsics.cpp)
883 Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
884 Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem);
885 Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
886
887 // Helper class to support reverting to a previous parsing state.
888 // When an intrinsic makes changes before bailing out, it's necessary to restore the graph
889 // as it was. See JDK-8359344 for what can happen wrong. It's also not always possible to
890 // bailout before making changes because the bailing out decision might depend on new nodes
891 // (their types, for instance).
892 //
893 // So, if an intrinsic might cause this situation, one must start by saving the state in a
894 // SavedState by constructing it, and the state will be restored on destruction. If the
895 // intrinsic is not bailing out, one need to call discard to prevent restoring the old state.
896 class SavedState : public StackObj {
897 GraphKit* _kit;
898 int _sp;
899 JVMState* _jvms;
900 SafePointNode* _map;
901 Unique_Node_List _ctrl_succ;
902 bool _discarded;
903
904 public:
905 SavedState(GraphKit*);
906 ~SavedState();
907 void discard();
908 };
909 };
910
911 // Helper class to support building of control flow branches. Upon
912 // creation the map and sp at bci are cloned and restored upon de-
913 // struction. Typical use:
914 //
915 // { PreserveJVMState pjvms(this);
916 // // code of new branch
917 // }
918 // // here the JVM state at bci is established
919
920 class PreserveJVMState: public StackObj {
921 protected:
922 GraphKit* _kit;
923 #ifdef ASSERT
924 int _block; // PO of current block, if a Parse
925 int _bci;
926 #endif
927 SafePointNode* _map;
928 uint _sp;
929
930 public:
931 PreserveJVMState(GraphKit* kit, bool clone_map = true);
932 ~PreserveJVMState();
933 };
934
935 // Helper class to build cutouts of the form if (p) ; else {x...}.
936 // The code {x...} must not fall through.
937 // The kit's main flow of control is set to the "then" continuation of if(p).
938 class BuildCutout: public PreserveJVMState {
939 public:
940 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
941 ~BuildCutout();
942 };
943
944 // Helper class to preserve the original _reexecute bit and _sp and restore
945 // them back
946 class PreserveReexecuteState: public StackObj {
947 protected:
948 GraphKit* _kit;
949 uint _sp;
950 JVMState::ReexecuteState _reexecute;
951
952 public:
953 PreserveReexecuteState(GraphKit* kit);
954 ~PreserveReexecuteState();
955 };
956
957 #endif // SHARE_OPTO_GRAPHKIT_HPP