1 /*
  2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "ci/ciInstanceKlass.hpp"
 26 #include "compiler/compileLog.hpp"
 27 #include "interpreter/linkResolver.hpp"
 28 #include "memory/universe.hpp"
 29 #include "oops/accessDecorators.hpp"
 30 #include "oops/flatArrayKlass.hpp"
 31 #include "oops/objArrayKlass.hpp"
 32 #include "opto/addnode.hpp"
 33 #include "opto/castnode.hpp"
 34 #include "opto/inlinetypenode.hpp"
 35 #include "opto/memnode.hpp"
 36 #include "opto/parse.hpp"
 37 #include "opto/rootnode.hpp"
 38 #include "opto/runtime.hpp"
 39 #include "opto/subnode.hpp"
 40 #include "runtime/deoptimization.hpp"
 41 #include "runtime/handles.inline.hpp"
 42 
 43 //=============================================================================
 44 // Helper methods for _get* and _put* bytecodes
 45 //=============================================================================
 46 
 47 void Parse::do_field_access(bool is_get, bool is_field) {
 48   bool will_link;
 49   ciField* field = iter().get_field(will_link);
 50   assert(will_link, "getfield: typeflow responsibility");
 51 
 52   if (is_field == field->is_static()) {
 53     // Interpreter will throw java_lang_IncompatibleClassChangeError
 54     // Check this before allowing <clinit> methods to access static fields
 55     uncommon_trap(Deoptimization::Reason_unhandled,
 56                   Deoptimization::Action_none);
 57     return;
 58   }
 59 
 60   // Deoptimize on putfield writes to call site target field outside of CallSite ctor.
 61   ciInstanceKlass* field_holder = field->holder();
 62   if (!is_get && field->is_call_site_target() &&
 63       !(method()->holder() == field_holder && method()->is_object_constructor())) {
 64     uncommon_trap(Deoptimization::Reason_unhandled,
 65                   Deoptimization::Action_reinterpret,
 66                   nullptr, "put to call site target field");
 67     return;
 68   }
 69 
 70   if (C->needs_clinit_barrier(field, method())) {
 71     clinit_barrier(field_holder, method());
 72     if (stopped())  return;
 73   }
 74 
 75   assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
 76 
 77   // Note:  We do not check for an unloaded field type here any more.
 78 
 79   // Generate code for the object pointer.
 80   Node* obj;
 81   if (is_field) {
 82     int obj_depth = is_get ? 0 : field->type()->size();
 83     obj = null_check(peek(obj_depth));
 84     // Compile-time detect of null-exception?
 85     if (stopped())  return;
 86 
 87 #ifdef ASSERT
 88     const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
 89     assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
 90 #endif
 91 
 92     if (is_get) {
 93       do_get_xxx(obj, field);
 94     } else {
 95       do_put_xxx(obj, field, is_field);
 96       if (stopped()) {
 97         return;
 98       }
 99       (void) pop();  // pop receiver after putting
100     }
101   } else {
102     const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
103     obj = _gvn.makecon(tip);
104     if (is_get) {
105       do_get_xxx(obj, field);
106     } else {
107       do_put_xxx(obj, field, is_field);
108     }
109   }
110 }
111 
112 void Parse::do_get_xxx(Node* obj, ciField* field) {
113   obj = cast_to_non_larval(obj);
114   BasicType bt = field->layout_type();
115   // Does this field have a constant value?  If so, just push the value.
116   if (field->is_constant() && !field->is_flat() &&
117       // Keep consistent with types found by ciTypeFlow: for an
118       // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
119       // speculates the field is null. The code in the rest of this
120       // method does the same. We must not bypass it and use a non
121       // null constant here.
122       (bt != T_OBJECT || field->type()->is_loaded())) {
123     // final or stable field
124     Node* con = make_constant_from_field(field, obj);
125     if (con != nullptr) {
126       if (!field->is_static()) {
127         pop();
128       }
129       push_node(field->layout_type(), con);
130       return;
131     }
132   }
133 
134   if (obj->is_InlineType()) {
135     assert(!field->is_static(), "must not be a static field");
136     InlineTypeNode* vt = obj->as_InlineType();
137     Node* value = vt->field_value_by_offset(field->offset_in_bytes(), false);
138     if (value->is_InlineType()) {
139       value = value->as_InlineType()->adjust_scalarization_depth(this);
140     }
141     pop();
142     push_node(field->layout_type(), value);
143     return;
144   }
145 
146   ciType* field_klass = field->type();
147   field_klass = improve_abstract_inline_type_klass(field_klass);
148   int offset = field->offset_in_bytes();
149   bool must_assert_null = false;
150   Node* adr = basic_plus_adr(obj, obj, offset);
151 
152   Node* ld = nullptr;
153   if (field->is_null_free() && field_klass->as_inline_klass()->is_empty()) {
154     // Loading from a field of an empty inline type. Just return the default instance.
155     ld = InlineTypeNode::make_all_zero(_gvn, field_klass->as_inline_klass());
156   } else if (field->is_flat()) {
157     // Loading from a flat inline type field.
158     ciInlineKlass* vk = field->type()->as_inline_klass();
159     bool is_immutable = field->is_final() && field->is_strict();
160     bool atomic = vk->must_be_atomic() || !field->is_null_free();
161     ld = InlineTypeNode::make_from_flat(this, field_klass->as_inline_klass(), obj, adr, atomic, is_immutable, field->is_null_free(), IN_HEAP | MO_UNORDERED);
162   } else {
163     // Build the resultant type of the load
164     const Type* type;
165     if (is_reference_type(bt)) {
166       if (!field_klass->is_loaded()) {
167         type = TypeInstPtr::BOTTOM;
168         must_assert_null = true;
169       } else if (field->is_static_constant()) {
170         // This can happen if the constant oop is non-perm.
171         ciObject* con = field->constant_value().as_object();
172         // Do not "join" in the previous type; it doesn't add value,
173         // and may yield a vacuous result if the field is of interface type.
174         if (con->is_null_object()) {
175           type = TypePtr::NULL_PTR;
176         } else {
177           type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
178         }
179         assert(type != nullptr, "field singleton type must be consistent");
180       } else {
181         type = TypeOopPtr::make_from_klass(field_klass->as_klass());
182         if (field->is_null_free()) {
183           type = type->join_speculative(TypePtr::NOTNULL);
184         }
185       }
186     } else {
187       type = Type::get_const_basic_type(bt);
188     }
189 
190     const TypePtr* adr_type = C->alias_type(field)->adr_type();
191     DecoratorSet decorators = IN_HEAP;
192     decorators |= field->is_volatile() ? MO_SEQ_CST : MO_UNORDERED;
193     ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
194     if (field_klass->is_inlinetype()) {
195       // Load a non-flattened inline type from memory
196       ld = InlineTypeNode::make_from_oop(this, ld, field_klass->as_inline_klass());
197     }
198   }
199 
200   // Adjust Java stack
201   if (!field->is_static()) {
202     pop();
203   }
204   if (type2size[bt] == 1) {
205     push(ld);
206   } else {
207     push_pair(ld);
208   }
209 
210   if (must_assert_null) {
211     // Do not take a trap here.  It's possible that the program
212     // will never load the field's class, and will happily see
213     // null values in this field forever.  Don't stumble into a
214     // trap for such a program, or we might get a long series
215     // of useless recompilations.  (Or, we might load a class
216     // which should not be loaded.)  If we ever see a non-null
217     // value, we will then trap and recompile.  (The trap will
218     // not need to mention the class index, since the class will
219     // already have been loaded if we ever see a non-null value.)
220     // uncommon_trap(iter().get_field_signature_index());
221     if (PrintOpto && (Verbose || WizardMode)) {
222       method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
223     }
224     if (C->log() != nullptr) {
225       C->log()->elem("assert_null reason='field' klass='%d'",
226                      C->log()->identify(field_klass));
227     }
228     // If there is going to be a trap, put it at the next bytecode:
229     set_bci(iter().next_bci());
230     null_assert(peek());
231     set_bci(iter().cur_bci()); // put it back
232   }
233 }
234 
235 // If the field klass is an abstract value klass (for which we do not know the layout, yet), it could have a unique
236 // concrete sub klass for which we have a fixed layout. This allows us to use InlineTypeNodes instead.
237 ciType* Parse::improve_abstract_inline_type_klass(ciType* field_klass) {
238   Dependencies* dependencies = C->dependencies();
239   if (UseUniqueSubclasses && dependencies != nullptr && field_klass->is_instance_klass()) {
240     ciInstanceKlass* instance_klass = field_klass->as_instance_klass();
241     if (instance_klass->is_loaded() && instance_klass->is_abstract_value_klass()) {
242       ciInstanceKlass* sub_klass = instance_klass->unique_concrete_subklass();
243       if (sub_klass != nullptr && sub_klass != field_klass) {
244         field_klass = sub_klass;
245         dependencies->assert_abstract_with_unique_concrete_subtype(instance_klass, sub_klass);
246       }
247     }
248   }
249   return field_klass;
250 }
251 
252 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
253   bool is_vol = field->is_volatile();
254   int offset = field->offset_in_bytes();
255 
256   BasicType bt = field->layout_type();
257   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
258   if (field->is_null_free()) {
259     PreserveReexecuteState preexecs(this);
260     jvms()->set_should_reexecute(true);
261     inc_sp(1);
262     val = null_check(val);
263     if (stopped()) {
264       return;
265     }
266   }
267 
268   val = cast_to_non_larval(val);
269   Node* adr = basic_plus_adr(obj, obj, offset);
270 
271   // We cannot store into a non-larval object, so obj must not be an InlineTypeNode
272   assert(!obj->is_InlineType(), "InlineTypeNodes are non-larval value objects");
273   if (field->is_null_free() && field->type()->as_inline_klass()->is_empty() && (!method()->is_object_constructor() || field->is_flat())) {
274     // Storing to a field of an empty, null-free inline type that is already initialized. Ignore.
275     return;
276   } else if (field->is_flat()) {
277     // Storing to a flat inline type field.
278     ciInlineKlass* vk = field->type()->as_inline_klass();
279     if (!val->is_InlineType()) {
280       assert(gvn().type(val) == TypePtr::NULL_PTR, "Unexpected value");
281       val = InlineTypeNode::make_null(gvn(), vk);
282     }
283     inc_sp(1);
284     bool is_immutable = field->is_final() && field->is_strict();
285     bool atomic = vk->must_be_atomic() || !field->is_null_free();
286     val->as_InlineType()->store_flat(this, obj, adr, atomic, is_immutable, field->is_null_free(), IN_HEAP | MO_UNORDERED);
287     dec_sp(1);
288   } else {
289     // Store the value.
290     const Type* field_type;
291     if (!field->type()->is_loaded()) {
292       field_type = TypeInstPtr::BOTTOM;
293     } else {
294       if (is_reference_type(bt)) {
295         field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
296       } else {
297         field_type = Type::BOTTOM;
298       }
299     }
300 
301     const TypePtr* adr_type = C->alias_type(field)->adr_type();
302     DecoratorSet decorators = IN_HEAP;
303     decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
304     inc_sp(1);
305     access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
306     dec_sp(1);
307   }
308 
309   if (is_field) {
310     // Remember we wrote a volatile field.
311     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
312     // in constructors which have such stores. See do_exits() in parse1.cpp.
313     if (is_vol) {
314       set_wrote_volatile(true);
315     }
316     set_wrote_fields(true);
317 
318     // If the field is final, the rules of Java say we are in <init> or <clinit>.
319     // If the field is @Stable, we can be in any method, but we only care about
320     // constructors at this point.
321     //
322     // Note the presence of writes to final/@Stable non-static fields, so that we
323     // can insert a memory barrier later on to keep the writes from floating
324     // out of the constructor.
325     if (field->is_final() || field->is_stable()) {
326       if (field->is_final()) {
327         set_wrote_final(true);
328       }
329       if (field->is_stable()) {
330         set_wrote_stable(true);
331       }
332       if (AllocateNode::Ideal_allocation(obj) != nullptr) {
333         // Preserve allocation ptr to create precedent edge to it in membar
334         // generated on exit from constructor.
335         set_alloc_with_final_or_stable(obj);
336       }
337     }
338   }
339 }
340 
341 //=============================================================================
342 
343 void Parse::do_newarray() {
344   bool will_link;
345   ciKlass* klass = iter().get_klass(will_link);
346 
347   // Uncommon Trap when class that array contains is not loaded
348   // we need the loaded class for the rest of graph; do not
349   // initialize the container class (see Java spec)!!!
350   assert(will_link, "newarray: typeflow responsibility");
351 
352   ciArrayKlass* array_klass = ciArrayKlass::make(klass);
353 
354   // Check that array_klass object is loaded
355   if (!array_klass->is_loaded()) {
356     // Generate uncommon_trap for unloaded array_class
357     uncommon_trap(Deoptimization::Reason_unloaded,
358                   Deoptimization::Action_reinterpret,
359                   array_klass);
360     return;
361   } else if (array_klass->element_klass() != nullptr &&
362              array_klass->element_klass()->is_inlinetype() &&
363              !array_klass->element_klass()->as_inline_klass()->is_initialized()) {
364     uncommon_trap(Deoptimization::Reason_uninitialized,
365                   Deoptimization::Action_reinterpret,
366                   nullptr);
367     return;
368   }
369 
370   kill_dead_locals();
371 
372   const TypeAryKlassPtr* array_klass_type = TypeAryKlassPtr::make(array_klass, Type::trust_interfaces);
373   array_klass_type = array_klass_type->cast_to_refined_array_klass_ptr();
374   Node* count_val = pop();
375   Node* obj = new_array(makecon(array_klass_type), count_val, 1);
376   push(obj);
377 }
378 
379 
380 void Parse::do_newarray(BasicType elem_type) {
381   kill_dead_locals();
382 
383   Node*   count_val = pop();
384   const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
385   Node*   obj = new_array(makecon(array_klass), count_val, 1);
386   // Push resultant oop onto stack
387   push(obj);
388 }
389 
390 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
391 // Also handle the degenerate 1-dimensional case of anewarray.
392 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
393   Node* length = lengths[0];
394   assert(length != nullptr, "");
395   const TypeAryKlassPtr* array_klass_type = TypeAryKlassPtr::make(array_klass, Type::trust_interfaces);
396   array_klass_type = array_klass_type->cast_to_refined_array_klass_ptr();
397   Node* array = new_array(makecon(array_klass_type), length, nargs);
398   if (ndimensions > 1) {
399     jint length_con = find_int_con(length, -1);
400     guarantee(length_con >= 0, "non-constant multianewarray");
401     ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
402     const TypePtr* adr_type = TypeAryPtr::OOPS;
403     const TypeOopPtr*    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
404     const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
405     for (jint i = 0; i < length_con; i++) {
406       Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
407       intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
408       Node*    eaddr  = basic_plus_adr(array, offset);
409       access_store_at(array, eaddr, adr_type, elem, elemtype, T_OBJECT, IN_HEAP | IS_ARRAY);
410     }
411   }
412   return array;
413 }
414 
415 void Parse::do_multianewarray() {
416   int ndimensions = iter().get_dimensions();
417 
418   // the m-dimensional array
419   bool will_link;
420   ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
421   assert(will_link, "multianewarray: typeflow responsibility");
422 
423   // Note:  Array classes are always initialized; no is_initialized check.
424 
425   kill_dead_locals();
426 
427   // get the lengths from the stack (first dimension is on top)
428   Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
429   length[ndimensions] = nullptr;  // terminating null for make_runtime_call
430   int j;
431   ciKlass* elem_klass = array_klass;
432   for (j = ndimensions-1; j >= 0; j--) {
433     length[j] = pop();
434     elem_klass = elem_klass->as_array_klass()->element_klass();
435   }
436   if (elem_klass != nullptr && elem_klass->is_inlinetype() && !elem_klass->as_inline_klass()->is_initialized()) {
437     inc_sp(ndimensions);
438     uncommon_trap(Deoptimization::Reason_uninitialized,
439                   Deoptimization::Action_reinterpret,
440                   nullptr);
441     return;
442   }
443 
444   // The original expression was of this form: new T[length0][length1]...
445   // It is often the case that the lengths are small (except the last).
446   // If that happens, use the fast 1-d creator a constant number of times.
447   const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
448   int64_t expand_count = 1;        // count of allocations in the expansion
449   int64_t expand_fanout = 1;       // running total fanout
450   for (j = 0; j < ndimensions-1; j++) {
451     int dim_con = find_int_con(length[j], -1);
452     // To prevent overflow, we use 64-bit values.  Alternatively,
453     // we could clamp dim_con like so:
454     // dim_con = MIN2(dim_con, expand_limit);
455     expand_fanout *= dim_con;
456     expand_count  += expand_fanout; // count the level-J sub-arrays
457     if (dim_con <= 0
458         || dim_con > expand_limit
459         || expand_count > expand_limit) {
460       expand_count = 0;
461       break;
462     }
463   }
464 
465   // Can use multianewarray instead of [a]newarray if only one dimension,
466   // or if all non-final dimensions are small constants.
467   if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
468     Node* obj = nullptr;
469     // Set the original stack and the reexecute bit for the interpreter
470     // to reexecute the multianewarray bytecode if deoptimization happens.
471     // Do it unconditionally even for one dimension multianewarray.
472     // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
473     // when AllocateArray node for newarray is created.
474     { PreserveReexecuteState preexecs(this);
475       inc_sp(ndimensions);
476       // Pass 0 as nargs since uncommon trap code does not need to restore stack.
477       obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
478     } //original reexecute and sp are set back here
479     push(obj);
480     return;
481   }
482 
483   address fun = nullptr;
484   switch (ndimensions) {
485   case 1: ShouldNotReachHere(); break;
486   case 2: fun = OptoRuntime::multianewarray2_Java(); break;
487   case 3: fun = OptoRuntime::multianewarray3_Java(); break;
488   case 4: fun = OptoRuntime::multianewarray4_Java(); break;
489   case 5: fun = OptoRuntime::multianewarray5_Java(); break;
490   };
491   Node* c = nullptr;
492 
493   if (fun != nullptr) {
494     c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
495                           OptoRuntime::multianewarray_Type(ndimensions),
496                           fun, nullptr, TypeRawPtr::BOTTOM,
497                           makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)),
498                           length[0], length[1], length[2],
499                           (ndimensions > 2) ? length[3] : nullptr,
500                           (ndimensions > 3) ? length[4] : nullptr);
501   } else {
502     // Create a java array for dimension sizes
503     Node* dims = nullptr;
504     { PreserveReexecuteState preexecs(this);
505       inc_sp(ndimensions);
506       Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
507       dims = new_array(dims_array_klass, intcon(ndimensions), 0);
508 
509       // Fill-in it with values
510       for (j = 0; j < ndimensions; j++) {
511         Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
512         store_to_memory(control(), dims_elem, length[j], T_INT, MemNode::unordered);
513       }
514     }
515 
516     c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
517                           OptoRuntime::multianewarrayN_Type(),
518                           OptoRuntime::multianewarrayN_Java(), nullptr, TypeRawPtr::BOTTOM,
519                           makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)),
520                           dims);
521   }
522   make_slow_call_ex(c, env()->Throwable_klass(), false);
523 
524   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms));
525 
526   const Type* type = TypeOopPtr::make_from_klass_raw(array_klass, Type::trust_interfaces);
527 
528   // Improve the type:  We know it's not null, exact, and of a given length.
529   type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
530   type = type->is_aryptr()->cast_to_exactness(true);
531 
532   const TypeInt* ltype = _gvn.find_int_type(length[0]);
533   if (ltype != nullptr)
534     type = type->is_aryptr()->cast_to_size(ltype);
535 
536     // We cannot sharpen the nested sub-arrays, since the top level is mutable.
537 
538   Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) );
539   push(cast);
540 
541   // Possible improvements:
542   // - Make a fast path for small multi-arrays.  (W/ implicit init. loops.)
543   // - Issue CastII against length[*] values, to TypeInt::POS.
544 }