< prev index next >

src/hotspot/share/c1/c1_GraphBuilder.cpp

Print this page

   1 /*
   2  * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "c1/c1_Canonicalizer.hpp"
  26 #include "c1/c1_CFGPrinter.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_GraphBuilder.hpp"

  29 #include "c1/c1_InstructionPrinter.hpp"

  30 #include "ci/ciCallSite.hpp"
  31 #include "ci/ciField.hpp"


  32 #include "ci/ciKlass.hpp"
  33 #include "ci/ciMemberName.hpp"
  34 #include "ci/ciSymbols.hpp"
  35 #include "ci/ciUtilities.inline.hpp"
  36 #include "classfile/javaClasses.hpp"
  37 #include "compiler/compilationPolicy.hpp"
  38 #include "compiler/compileBroker.hpp"
  39 #include "compiler/compilerEvent.hpp"
  40 #include "interpreter/bytecode.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "memory/resourceArea.hpp"

  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/checkedCast.hpp"
  45 #include "utilities/macros.hpp"
  46 #if INCLUDE_JFR
  47 #include "jfr/jfr.hpp"
  48 #endif
  49 
  50 class BlockListBuilder {
  51  private:
  52   Compilation* _compilation;
  53   IRScope*     _scope;
  54 
  55   BlockList    _blocks;                // internal list of all blocks
  56   BlockList*   _bci2block;             // mapping from bci to blocks for GraphBuilder
  57   GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend
  58 
  59   // fields used by mark_loops
  60   ResourceBitMap _active;              // for iteration of control flow graph
  61   ResourceBitMap _visited;             // for iteration of control flow graph
  62   GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop

1030       // they are using this local. We don't handle skipping over a
1031       // ret.
1032       for (ScopeData* cur_scope_data = scope_data()->parent();
1033            cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
1034            cur_scope_data = cur_scope_data->parent()) {
1035         if (cur_scope_data->jsr_return_address_local() == index) {
1036           BAILOUT("subroutine overwrites return address from previous subroutine");
1037         }
1038       }
1039     } else if (index == scope_data()->jsr_return_address_local()) {
1040       scope_data()->set_jsr_return_address_local(-1);
1041     }
1042   }
1043 
1044   state->store_local(index, x);
1045 }
1046 
1047 
1048 void GraphBuilder::load_indexed(BasicType type) {
1049   // In case of in block code motion in range check elimination
1050   ValueStack* state_before = copy_state_indexed_access();








1051   compilation()->set_has_access_indexed(true);
1052   Value index = ipop();
1053   Value array = apop();
1054   Value length = nullptr;
1055   if (CSEArrayLength ||
1056       (array->as_Constant() != nullptr) ||
1057       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1058       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1059       (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1060     length = append(new ArrayLength(array, state_before));
1061   }
1062   push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before)));























































1063 }
1064 
1065 
1066 void GraphBuilder::store_indexed(BasicType type) {
1067   // In case of in block code motion in range check elimination
1068   ValueStack* state_before = copy_state_indexed_access();








1069   compilation()->set_has_access_indexed(true);
1070   Value value = pop(as_ValueType(type));
1071   Value index = ipop();
1072   Value array = apop();
1073   Value length = nullptr;
1074   if (CSEArrayLength ||
1075       (array->as_Constant() != nullptr) ||
1076       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1077       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1078       (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1079     length = append(new ArrayLength(array, state_before));
1080   }
1081   ciType* array_type = array->declared_type();
1082   bool check_boolean = false;
1083   if (array_type != nullptr) {
1084     if (array_type->is_loaded() &&
1085       array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) {
1086       assert(type == T_BYTE, "boolean store uses bastore");
1087       Value mask = append(new Constant(new IntConstant(1)));
1088       value = append(new LogicOp(Bytecodes::_iand, value, mask));
1089     }
1090   } else if (type == T_BYTE) {
1091     check_boolean = true;
1092   }
1093   StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before, check_boolean);
1094   append(result);
1095   _memory->store_value(value);
1096 
1097   if (type == T_OBJECT && is_profiling()) {
1098     // Note that we'd collect profile data in this method if we wanted it.
1099     compilation()->set_would_profile(true);
1100 
1101     if (profile_checkcasts()) {
1102       result->set_profiled_method(method());
1103       result->set_profiled_bci(bci());
1104       result->set_should_profile(true);
1105     }
1106   }



1107 }
1108 
1109 
1110 void GraphBuilder::stack_op(Bytecodes::Code code) {
1111   switch (code) {
1112     case Bytecodes::_pop:
1113       { state()->raw_pop();
1114       }
1115       break;
1116     case Bytecodes::_pop2:
1117       { state()->raw_pop();
1118         state()->raw_pop();
1119       }
1120       break;
1121     case Bytecodes::_dup:
1122       { Value w = state()->raw_pop();
1123         state()->raw_push(w);
1124         state()->raw_push(w);
1125       }
1126       break;
1127     case Bytecodes::_dup_x1:
1128       { Value w1 = state()->raw_pop();
1129         Value w2 = state()->raw_pop();
1130         state()->raw_push(w1);
1131         state()->raw_push(w2);
1132         state()->raw_push(w1);
1133       }
1134       break;
1135     case Bytecodes::_dup_x2:
1136       { Value w1 = state()->raw_pop();
1137         Value w2 = state()->raw_pop();
1138         Value w3 = state()->raw_pop();

1274 
1275 
1276 void GraphBuilder::_goto(int from_bci, int to_bci) {
1277   Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
1278   if (is_profiling()) {
1279     compilation()->set_would_profile(true);
1280     x->set_profiled_bci(bci());
1281     if (profile_branches()) {
1282       x->set_profiled_method(method());
1283       x->set_should_profile(true);
1284     }
1285   }
1286   append(x);
1287 }
1288 
1289 
1290 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
1291   BlockBegin* tsux = block_at(stream()->get_dest());
1292   BlockBegin* fsux = block_at(stream()->next_bci());
1293   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();



























1294   // In case of loop invariant code motion or predicate insertion
1295   // before the body of a loop the state is needed
1296   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : nullptr, is_bb));
1297 
1298   assert(i->as_Goto() == nullptr ||
1299          (i->as_Goto()->sux_at(0) == tsux  && i->as_Goto()->is_safepoint() == (tsux->bci() < stream()->cur_bci())) ||
1300          (i->as_Goto()->sux_at(0) == fsux  && i->as_Goto()->is_safepoint() == (fsux->bci() < stream()->cur_bci())),
1301          "safepoint state of Goto returned by canonicalizer incorrect");
1302 
1303   if (is_profiling()) {
1304     If* if_node = i->as_If();
1305     if (if_node != nullptr) {
1306       // Note that we'd collect profile data in this method if we wanted it.
1307       compilation()->set_would_profile(true);
1308       // At level 2 we need the proper bci to count backedges
1309       if_node->set_profiled_bci(bci());
1310       if (profile_branches()) {
1311         // Successors can be rotated by the canonicalizer, check for this case.
1312         if_node->set_profiled_method(method());
1313         if_node->set_should_profile(true);
1314         if (if_node->tsux() == fsux) {
1315           if_node->set_swapped(true);
1316         }

1531   }
1532 
1533   if (needs_check) {
1534     // Perform the registration of finalizable objects.
1535     ValueStack* state_before = copy_state_for_exception();
1536     load_local(objectType, 0);
1537     append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
1538                                state()->pop_arguments(1),
1539                                true, state_before, true));
1540   }
1541 }
1542 
1543 
1544 void GraphBuilder::method_return(Value x, bool ignore_return) {
1545   if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1546     call_register_finalizer();
1547   }
1548 
1549   // The conditions for a memory barrier are described in Parse::do_exits().
1550   bool need_mem_bar = false;
1551   if (method()->name() == ciSymbols::object_initializer_name() &&
1552        (scope()->wrote_final() || scope()->wrote_stable() ||
1553          (AlwaysSafeConstructors && scope()->wrote_fields()) ||
1554          (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) {
1555     need_mem_bar = true;
1556   }
1557 
1558   BasicType bt = method()->return_type()->basic_type();
1559   switch (bt) {
1560     case T_BYTE:
1561     {
1562       Value shift = append(new Constant(new IntConstant(24)));
1563       x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1564       x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
1565       break;
1566     }
1567     case T_SHORT:
1568     {
1569       Value shift = append(new Constant(new IntConstant(16)));
1570       x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1571       x = append(new ShiftOp(Bytecodes::_ishr, x, shift));

1682   // Attach dimension info to stable arrays.
1683   if (FoldStableValues &&
1684       field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) {
1685     ciArray* array = field_value.as_object()->as_array();
1686     jint dimension = field->type()->as_array_klass()->dimension();
1687     value = new StableArrayConstant(array, dimension);
1688   }
1689 
1690   switch (field_type) {
1691     case T_ARRAY:
1692     case T_OBJECT:
1693       if (field_value.as_object()->should_be_constant()) {
1694         return new Constant(value);
1695       }
1696       return nullptr; // Not a constant.
1697     default:
1698       return new Constant(value);
1699   }
1700 }
1701 























1702 void GraphBuilder::access_field(Bytecodes::Code code) {
1703   bool will_link;
1704   ciField* field = stream()->get_field(will_link);
1705   ciInstanceKlass* holder = field->holder();
1706   BasicType field_type = field->type()->basic_type();
1707   ValueType* type = as_ValueType(field_type);

1708   // call will_link again to determine if the field is valid.
1709   const bool needs_patching = !holder->is_loaded() ||
1710                               !field->will_link(method(), code) ||
1711                               PatchALot;
1712 
1713   ValueStack* state_before = nullptr;
1714   if (!holder->is_initialized() || needs_patching) {
1715     // save state before instruction for debug info when
1716     // deoptimization happens during patching
1717     state_before = copy_state_before();
1718   }
1719 
1720   Value obj = nullptr;
1721   if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
1722     if (state_before != nullptr) {
1723       // build a patching constant
1724       obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
1725     } else {
1726       obj = new Constant(new InstanceConstant(holder->java_mirror()));
1727     }
1728   }
1729 
1730   if (code == Bytecodes::_putfield) {
1731     scope()->set_wrote_fields();
1732     if (field->is_volatile()) {
1733       scope()->set_wrote_volatile();
1734     }
1735     if (field->is_final()) {
1736       scope()->set_wrote_final();
1737     }
1738     if (field->is_stable()) {
1739       scope()->set_wrote_stable();
1740     }
1741   }
1742 
1743   const int offset = !needs_patching ? field->offset_in_bytes() : -1;
1744   switch (code) {
1745     case Bytecodes::_getstatic: {
1746       // check for compile-time constants, i.e., initialized static final fields
1747       Value constant = nullptr;
1748       if (field->is_static_constant() && !PatchALot) {
1749         ciConstant field_value = field->constant_value();
1750         assert(!field->is_stable() || !field_value.is_null_or_zero(),
1751                "stable static w/ default value shouldn't be a constant");
1752         constant = make_constant(field_value, field);
1753       }
1754       if (constant != nullptr) {
1755         push(type, append(constant));
1756       } else {
1757         if (state_before == nullptr) {
1758           state_before = copy_state_for_exception();
1759         }
1760         push(type, append(new LoadField(append(obj), offset, field, true,
1761                                         state_before, needs_patching)));

1762       }
1763       break;
1764     }
1765     case Bytecodes::_putstatic: {
1766       Value val = pop(type);
1767       if (state_before == nullptr) {
1768         state_before = copy_state_for_exception();
1769       }
1770       if (field->type()->basic_type() == T_BOOLEAN) {
1771         Value mask = append(new Constant(new IntConstant(1)));
1772         val = append(new LogicOp(Bytecodes::_iand, val, mask));
1773       }







1774       append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
1775       break;
1776     }
1777     case Bytecodes::_getfield: {
1778       // Check for compile-time constants, i.e., trusted final non-static fields.
1779       Value constant = nullptr;
1780       obj = apop();
1781       ObjectType* obj_type = obj->type()->as_ObjectType();
1782       if (field->is_constant() && obj_type->is_constant() && !PatchALot) {
1783         ciObject* const_oop = obj_type->constant_value();
1784         if (!const_oop->is_null_object() && const_oop->is_loaded()) {
1785           ciConstant field_value = field->constant_value_of(const_oop);
1786           if (field_value.is_valid()) {
1787             constant = make_constant(field_value, field);
1788             // For CallSite objects add a dependency for invalidation of the optimization.
1789             if (field->is_call_site_target()) {
1790               ciCallSite* call_site = const_oop->as_call_site();
1791               if (!call_site->is_fully_initialized_constant_call_site()) {
1792                 ciMethodHandle* target = field_value.as_object()->as_method_handle();
1793                 dependency_recorder()->assert_call_site_target_value(call_site, target);







1794               }
1795             }
1796           }
1797         }
1798       }
1799       if (constant != nullptr) {
1800         push(type, append(constant));
1801       } else {
1802         if (state_before == nullptr) {
1803           state_before = copy_state_for_exception();
1804         }
1805         LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
1806         Value replacement = !needs_patching ? _memory->load(load) : load;
1807         if (replacement != load) {
1808           assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
1809           // Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing
1810           // conversion. Emit an explicit conversion here to get the correct field value after the write.
1811           BasicType bt = field->type()->basic_type();
1812           switch (bt) {
1813           case T_BOOLEAN:
1814           case T_BYTE:
1815             replacement = append(new Convert(Bytecodes::_i2b, replacement, as_ValueType(bt)));
1816             break;
1817           case T_CHAR:
1818             replacement = append(new Convert(Bytecodes::_i2c, replacement, as_ValueType(bt)));
1819             break;
1820           case T_SHORT:
1821             replacement = append(new Convert(Bytecodes::_i2s, replacement, as_ValueType(bt)));
1822             break;
1823           default:
1824             break;
1825           }
1826           push(type, replacement);























1827         } else {
1828           push(type, append(load));




























































































1829         }
1830       }
1831       break;
1832     }
1833     case Bytecodes::_putfield: {
1834       Value val = pop(type);
1835       obj = apop();
1836       if (state_before == nullptr) {
1837         state_before = copy_state_for_exception();
1838       }
1839       if (field->type()->basic_type() == T_BOOLEAN) {
1840         Value mask = append(new Constant(new IntConstant(1)));
1841         val = append(new LogicOp(Bytecodes::_iand, val, mask));
1842       }
1843       StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching);
1844       if (!needs_patching) store = _memory->store(store);
1845       if (store != nullptr) {
1846         append(store);










































1847       }
1848       break;
1849     }
1850     default:
1851       ShouldNotReachHere();
1852       break;
1853   }
1854 }
1855 
1856 
1857 Dependencies* GraphBuilder::dependency_recorder() const {
1858   return compilation()->dependency_recorder();
1859 }
1860 
1861 // How many arguments do we want to profile?
1862 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) {
1863   int n = 0;
1864   bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
1865   start = has_receiver ? 1 : 0;
1866   if (profile_arguments()) {
1867     ciProfileData* data = method()->method_data()->bci_to_data(bci());
1868     if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) {
1869       n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
1870     }
1871   }
1872   // If we are inlining then we need to collect arguments to profile parameters for the target
1873   if (profile_parameters() && target != nullptr) {
1874     if (target->method_data() != nullptr && target->method_data()->parameters_type_data() != nullptr) {
1875       // The receiver is profiled on method entry so it's included in
1876       // the number of parameters but here we're only interested in

1952       break;
1953     case Bytecodes::_invokehandle:
1954       code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
1955       break;
1956     default:
1957       break;
1958     }
1959   } else {
1960     if (bc_raw == Bytecodes::_invokehandle) {
1961       assert(!will_link, "should come here only for unlinked call");
1962       code = Bytecodes::_invokespecial;
1963     }
1964   }
1965 
1966   if (code == Bytecodes::_invokespecial) {
1967     // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.
1968     ciKlass* receiver_constraint = nullptr;
1969 
1970     if (bc_raw == Bytecodes::_invokeinterface) {
1971       receiver_constraint = holder;
1972     } else if (bc_raw == Bytecodes::_invokespecial && !target->is_object_initializer() && calling_klass->is_interface()) {
1973       receiver_constraint = calling_klass;
1974     }
1975 
1976     if (receiver_constraint != nullptr) {
1977       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
1978       Value receiver = state()->stack_at(index);
1979       CheckCast* c = new CheckCast(receiver_constraint, receiver, copy_state_before());
1980       // go to uncommon_trap when checkcast fails
1981       c->set_invokespecial_receiver_check();
1982       state()->stack_at_put(index, append_split(c));
1983     }
1984   }
1985 
1986   // Push appendix argument (MethodType, CallSite, etc.), if one.
1987   bool patch_for_appendix = false;
1988   int patching_appendix_arg = 0;
1989   if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) {
1990     Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
1991     apush(arg);
1992     patch_for_appendix = true;

2142       }
2143     } else {
2144       print_inlining(target, "no static binding", /*success*/ false);
2145     }
2146   } else {
2147     print_inlining(target, "not inlineable", /*success*/ false);
2148   }
2149 
2150   // If we attempted an inline which did not succeed because of a
2151   // bailout during construction of the callee graph, the entire
2152   // compilation has to be aborted. This is fairly rare and currently
2153   // seems to only occur for jasm-generated classes which contain
2154   // jsr/ret pairs which are not associated with finally clauses and
2155   // do not have exception handlers in the containing method, and are
2156   // therefore not caught early enough to abort the inlining without
2157   // corrupting the graph. (We currently bail out with a non-empty
2158   // stack at a ret in these situations.)
2159   CHECK_BAILOUT();
2160 
2161   // inlining not successful => standard invoke
2162   ValueType* result_type = as_ValueType(declared_signature->return_type());
2163   ValueStack* state_before = copy_state_exhandling();
2164 
2165   // The bytecode (code) might change in this method so we are checking this very late.
2166   const bool has_receiver =
2167     code == Bytecodes::_invokespecial   ||
2168     code == Bytecodes::_invokevirtual   ||
2169     code == Bytecodes::_invokeinterface;
2170   Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
2171   Value recv = has_receiver ? apop() : nullptr;
2172 
2173   // A null check is required here (when there is a receiver) for any of the following cases
2174   // - invokespecial, always need a null check.
2175   // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized
2176   //   and require null checking. If the target is loaded a null check is emitted here.
2177   //   If the target isn't loaded the null check must happen after the call resolution. We achieve that
2178   //   by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry).
2179   //   (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may
2180   //   potentially fail, and can't have the null check before the resolution.)
2181   // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same
2182   //   reason as above, so calls with a receiver to unloaded targets can't be profiled.)

2191       null_check(recv);
2192     }
2193 
2194     if (is_profiling()) {
2195       // Note that we'd collect profile data in this method if we wanted it.
2196       compilation()->set_would_profile(true);
2197 
2198       if (profile_calls()) {
2199         assert(cha_monomorphic_target == nullptr || exact_target == nullptr, "both can not be set");
2200         ciKlass* target_klass = nullptr;
2201         if (cha_monomorphic_target != nullptr) {
2202           target_klass = cha_monomorphic_target->holder();
2203         } else if (exact_target != nullptr) {
2204           target_klass = exact_target->holder();
2205         }
2206         profile_call(target, recv, target_klass, collect_args_for_profiling(args, nullptr, false), false);
2207       }
2208     }
2209   }
2210 
2211   Invoke* result = new Invoke(code, result_type, recv, args, target, state_before);
2212   // push result
2213   append_split(result);
2214 
2215   if (result_type != voidType) {
2216     push(result_type, result);
2217   }
2218   if (profile_return() && result_type->is_object_kind()) {

2219     profile_return_type(result, target);
2220   }
2221 }
2222 
2223 
2224 void GraphBuilder::new_instance(int klass_index) {
2225   ValueStack* state_before = copy_state_exhandling();
2226   ciKlass* klass = stream()->get_klass();
2227   assert(klass->is_instance_klass(), "must be an instance klass");
2228   NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass());
2229   _memory->new_instance(new_instance);
2230   apush(append_split(new_instance));
2231 }
2232 
2233 
2234 void GraphBuilder::new_type_array() {
2235   ValueStack* state_before = copy_state_exhandling();
2236   apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true)));
2237 }
2238 
2239 
2240 void GraphBuilder::new_object_array() {
2241   ciKlass* klass = stream()->get_klass();
2242   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2243   NewArray* n = new NewObjectArray(klass, ipop(), state_before);
2244   apush(append_split(n));
2245 }
2246 
2247 
2248 bool GraphBuilder::direct_compare(ciKlass* k) {
2249   if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
2250     ciInstanceKlass* ik = k->as_instance_klass();
2251     if (ik->is_final()) {
2252       return true;
2253     } else {

2286   ciKlass* klass = stream()->get_klass();
2287   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2288   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
2289   ipush(append_split(i));
2290   i->set_direct_compare(direct_compare(klass));
2291 
2292   if (is_profiling()) {
2293     // Note that we'd collect profile data in this method if we wanted it.
2294     compilation()->set_would_profile(true);
2295 
2296     if (profile_checkcasts()) {
2297       i->set_profiled_method(method());
2298       i->set_profiled_bci(bci());
2299       i->set_should_profile(true);
2300     }
2301   }
2302 }
2303 
2304 
2305 void GraphBuilder::monitorenter(Value x, int bci) {



















2306   // save state before locking in case of deoptimization after a NullPointerException
2307   ValueStack* state_before = copy_state_for_exception_with_bci(bci);
2308   append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci);
2309   kill_all();
2310 }
2311 
2312 
2313 void GraphBuilder::monitorexit(Value x, int bci) {
2314   append_with_bci(new MonitorExit(x, state()->unlock()), bci);
2315   kill_all();
2316 }
2317 
2318 
2319 void GraphBuilder::new_multi_array(int dimensions) {
2320   ciKlass* klass = stream()->get_klass();
2321   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2322 
2323   Values* dims = new Values(dimensions, dimensions, nullptr);
2324   // fill in all dimensions
2325   int i = dimensions;
2326   while (i-- > 0) dims->at_put(i, ipop());
2327   // create array
2328   NewArray* n = new NewMultiArray(klass, dims, state_before);

2413 
2414 Instruction* GraphBuilder::append_split(StateSplit* instr) {
2415   return append_with_bci(instr, bci());
2416 }
2417 
2418 
2419 void GraphBuilder::null_check(Value value) {
2420   if (value->as_NewArray() != nullptr || value->as_NewInstance() != nullptr) {
2421     return;
2422   } else {
2423     Constant* con = value->as_Constant();
2424     if (con) {
2425       ObjectType* c = con->type()->as_ObjectType();
2426       if (c && c->is_loaded()) {
2427         ObjectConstant* oc = c->as_ObjectConstant();
2428         if (!oc || !oc->value()->is_null_object()) {
2429           return;
2430         }
2431       }
2432     }

2433   }
2434   append(new NullCheck(value, copy_state_for_exception()));
2435 }
2436 
2437 
2438 
2439 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
2440   if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != nullptr)) {
2441     assert(instruction->exception_state() == nullptr
2442            || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
2443            || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()),
2444            "exception_state should be of exception kind");
2445     return new XHandlers();
2446   }
2447 
2448   XHandlers*  exception_handlers = new XHandlers();
2449   ScopeData*  cur_scope_data = scope_data();
2450   ValueStack* cur_state = instruction->state_before();
2451   ValueStack* prev_state = nullptr;
2452   int scope_count = 0;
2453 
2454   assert(cur_state != nullptr, "state_before must be set");
2455   do {
2456     int cur_bci = cur_state->bci();
2457     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2458     assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");


2459 
2460     // join with all potential exception handlers
2461     XHandlers* list = cur_scope_data->xhandlers();
2462     const int n = list->length();
2463     for (int i = 0; i < n; i++) {
2464       XHandler* h = list->handler_at(i);
2465       if (h->covers(cur_bci)) {
2466         // h is a potential exception handler => join it
2467         compilation()->set_has_exception_handlers(true);
2468 
2469         BlockBegin* entry = h->entry_block();
2470         if (entry == block()) {
2471           // It's acceptable for an exception handler to cover itself
2472           // but we don't handle that in the parser currently.  It's
2473           // very rare so we bailout instead of trying to handle it.
2474           BAILOUT_("exception handler covers itself", exception_handlers);
2475         }
2476         assert(entry->bci() == h->handler_bci(), "must match");
2477         assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
2478 

3252     state->store_local(idx, new Local(type, vt, idx, false));
3253     idx += type->size();
3254   }
3255 
3256   // lock synchronized method
3257   if (method()->is_synchronized()) {
3258     state->lock(nullptr);
3259   }
3260 
3261   return state;
3262 }
3263 
3264 
3265 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
3266   : _scope_data(nullptr)
3267   , _compilation(compilation)
3268   , _memory(new MemoryBuffer())
3269   , _inline_bailout_msg(nullptr)
3270   , _instruction_count(0)
3271   , _osr_entry(nullptr)


3272 {
3273   int osr_bci = compilation->osr_bci();
3274 
3275   // determine entry points and bci2block mapping
3276   BlockListBuilder blm(compilation, scope, osr_bci);
3277   CHECK_BAILOUT();
3278 
3279   BlockList* bci2block = blm.bci2block();
3280   BlockBegin* start_block = bci2block->at(0);
3281 
3282   push_root_scope(scope, bci2block, start_block);
3283 
3284   // setup state for std entry
3285   _initial_state = state_at_entry();
3286   start_block->merge(_initial_state, compilation->has_irreducible_loops());
3287 
3288   // End nulls still exist here
3289 
3290   // complete graph
3291   _vmap        = new ValueMap();

4004   // Temporarily set up bytecode stream so we can append instructions
4005   // (only using the bci of this stream)
4006   scope_data()->set_stream(scope_data()->parent()->stream());
4007 
4008   // Pass parameters into callee state: add assignments
4009   // note: this will also ensure that all arguments are computed before being passed
4010   ValueStack* callee_state = state();
4011   ValueStack* caller_state = state()->caller_state();
4012   for (int i = args_base; i < caller_state->stack_size(); ) {
4013     const int arg_no = i - args_base;
4014     Value arg = caller_state->stack_at_inc(i);
4015     store_local(callee_state, arg, arg_no);
4016   }
4017 
4018   // Remove args from stack.
4019   // Note that we preserve locals state in case we can use it later
4020   // (see use of pop_scope() below)
4021   caller_state->truncate_stack(args_base);
4022   assert(callee_state->stack_size() == 0, "callee stack must be empty");
4023 




























4024   Value lock = nullptr;
4025   BlockBegin* sync_handler = nullptr;
4026 
4027   // Inline the locking of the receiver if the callee is synchronized
4028   if (callee->is_synchronized()) {
4029     lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
4030                                : state()->local_at(0);
4031     sync_handler = new BlockBegin(SynchronizationEntryBCI);
4032     inline_sync_entry(lock, sync_handler);
4033   }
4034 
4035   if (compilation()->env()->dtrace_method_probes()) {
4036     Values* args = new Values(1);
4037     args->push(append(new Constant(new MethodConstant(method()))));
4038     append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
4039   }
4040 
4041   if (profile_inlined_calls()) {
4042     profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI));
4043   }

   1 /*
   2  * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "c1/c1_Canonicalizer.hpp"
  26 #include "c1/c1_CFGPrinter.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_GraphBuilder.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_InstructionPrinter.hpp"
  31 #include "c1/c1_ValueType.hpp"
  32 #include "ci/ciCallSite.hpp"
  33 #include "ci/ciField.hpp"
  34 #include "ci/ciFlatArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciKlass.hpp"
  37 #include "ci/ciMemberName.hpp"
  38 #include "ci/ciSymbols.hpp"
  39 #include "ci/ciUtilities.inline.hpp"
  40 #include "classfile/javaClasses.hpp"
  41 #include "compiler/compilationPolicy.hpp"
  42 #include "compiler/compileBroker.hpp"
  43 #include "compiler/compilerEvent.hpp"
  44 #include "interpreter/bytecode.hpp"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "utilities/checkedCast.hpp"
  50 #include "utilities/macros.hpp"
  51 #if INCLUDE_JFR
  52 #include "jfr/jfr.hpp"
  53 #endif
  54 
  55 class BlockListBuilder {
  56  private:
  57   Compilation* _compilation;
  58   IRScope*     _scope;
  59 
  60   BlockList    _blocks;                // internal list of all blocks
  61   BlockList*   _bci2block;             // mapping from bci to blocks for GraphBuilder
  62   GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend
  63 
  64   // fields used by mark_loops
  65   ResourceBitMap _active;              // for iteration of control flow graph
  66   ResourceBitMap _visited;             // for iteration of control flow graph
  67   GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop

1035       // they are using this local. We don't handle skipping over a
1036       // ret.
1037       for (ScopeData* cur_scope_data = scope_data()->parent();
1038            cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
1039            cur_scope_data = cur_scope_data->parent()) {
1040         if (cur_scope_data->jsr_return_address_local() == index) {
1041           BAILOUT("subroutine overwrites return address from previous subroutine");
1042         }
1043       }
1044     } else if (index == scope_data()->jsr_return_address_local()) {
1045       scope_data()->set_jsr_return_address_local(-1);
1046     }
1047   }
1048 
1049   state->store_local(index, x);
1050 }
1051 
1052 
1053 void GraphBuilder::load_indexed(BasicType type) {
1054   // In case of in block code motion in range check elimination
1055   ValueStack* state_before = nullptr;
1056   int array_idx = state()->stack_size() - 2;
1057   if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) {
1058     // Save the entire state and re-execute on deopt when accessing flat arrays
1059     state_before = copy_state_before();
1060     state_before->set_should_reexecute(true);
1061   } else {
1062     state_before = copy_state_indexed_access();
1063   }
1064   compilation()->set_has_access_indexed(true);
1065   Value index = ipop();
1066   Value array = apop();
1067   Value length = nullptr;
1068   if (CSEArrayLength ||
1069       (array->as_Constant() != nullptr) ||
1070       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1071       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1072       (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1073     length = append(new ArrayLength(array, state_before));
1074   }
1075 
1076   bool need_membar = false;
1077   LoadIndexed* load_indexed = nullptr;
1078   Instruction* result = nullptr;
1079   if (array->is_loaded_flat_array()) {
1080     // TODO 8350865 This is currently dead code. Can we use set_null_free on the result here if the array is null-free?
1081     ciType* array_type = array->declared_type();
1082     ciInlineKlass* elem_klass = array_type->as_flat_array_klass()->element_klass()->as_inline_klass();
1083 
1084     bool can_delay_access = false;
1085     ciBytecodeStream s(method());
1086     s.force_bci(bci());
1087     s.next();
1088     if (s.cur_bc() == Bytecodes::_getfield) {
1089       bool will_link;
1090       ciField* next_field = s.get_field(will_link);
1091       bool next_needs_patching = !next_field->holder()->is_initialized() ||
1092                                  !next_field->will_link(method(), Bytecodes::_getfield) ||
1093                                  PatchALot;
1094       can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching;
1095     }
1096     if (can_delay_access) {
1097       // potentially optimizable array access, storing information for delayed decision
1098       LoadIndexed* li = new LoadIndexed(array, index, length, type, state_before);
1099       DelayedLoadIndexed* dli = new DelayedLoadIndexed(li, state_before);
1100       li->set_delayed(dli);
1101       set_pending_load_indexed(dli);
1102       return; // Nothing else to do for now
1103     } else {
1104       NewInstance* new_instance = new NewInstance(elem_klass, state_before, false, true);
1105       _memory->new_instance(new_instance);
1106       apush(append_split(new_instance));
1107       load_indexed = new LoadIndexed(array, index, length, type, state_before);
1108       load_indexed->set_vt(new_instance);
1109       // The LoadIndexed node will initialise this instance by copying from
1110       // the flat field.  Ensure these stores are visible before any
1111       // subsequent store that publishes this reference.
1112       need_membar = true;
1113     }
1114   } else {
1115     load_indexed = new LoadIndexed(array, index, length, type, state_before);
1116     if (profile_array_accesses() && is_reference_type(type)) {
1117       compilation()->set_would_profile(true);
1118       load_indexed->set_should_profile(true);
1119       load_indexed->set_profiled_method(method());
1120       load_indexed->set_profiled_bci(bci());
1121     }
1122   }
1123   result = append(load_indexed);
1124   if (need_membar) {
1125     append(new MemBar(lir_membar_storestore));
1126   }
1127   assert(!load_indexed->should_profile() || load_indexed == result, "should not be optimized out");
1128   if (!array->is_loaded_flat_array()) {
1129     push(as_ValueType(type), result);
1130   }
1131 }
1132 
1133 
1134 void GraphBuilder::store_indexed(BasicType type) {
1135   // In case of in block code motion in range check elimination
1136   ValueStack* state_before = nullptr;
1137   int array_idx = state()->stack_size() - 3;
1138   if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) {
1139     // Save the entire state and re-execute on deopt when accessing flat arrays
1140     state_before = copy_state_before();
1141     state_before->set_should_reexecute(true);
1142   } else {
1143     state_before = copy_state_indexed_access();
1144   }
1145   compilation()->set_has_access_indexed(true);
1146   Value value = pop(as_ValueType(type));
1147   Value index = ipop();
1148   Value array = apop();
1149   Value length = nullptr;
1150   if (CSEArrayLength ||
1151       (array->as_Constant() != nullptr) ||
1152       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1153       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1154       (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1155     length = append(new ArrayLength(array, state_before));
1156   }
1157   ciType* array_type = array->declared_type();
1158   bool check_boolean = false;
1159   if (array_type != nullptr) {
1160     if (array_type->is_loaded() &&
1161       array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) {
1162       assert(type == T_BYTE, "boolean store uses bastore");
1163       Value mask = append(new Constant(new IntConstant(1)));
1164       value = append(new LogicOp(Bytecodes::_iand, value, mask));
1165     }
1166   } else if (type == T_BYTE) {
1167     check_boolean = true;
1168   }



1169 
1170   StoreIndexed* store_indexed = new StoreIndexed(array, index, length, type, value, state_before, check_boolean);
1171   if (profile_array_accesses() && is_reference_type(type) && !array->is_loaded_flat_array()) {
1172     compilation()->set_would_profile(true);
1173     store_indexed->set_should_profile(true);
1174     store_indexed->set_profiled_method(method());
1175     store_indexed->set_profiled_bci(bci());



1176   }
1177   Instruction* result = append(store_indexed);
1178   assert(!store_indexed->should_profile() || store_indexed == result, "should not be optimized out");
1179   _memory->store_value(value);
1180 }
1181 

1182 void GraphBuilder::stack_op(Bytecodes::Code code) {
1183   switch (code) {
1184     case Bytecodes::_pop:
1185       { Value w = state()->raw_pop();
1186       }
1187       break;
1188     case Bytecodes::_pop2:
1189       { Value w1 = state()->raw_pop();
1190         Value w2 = state()->raw_pop();
1191       }
1192       break;
1193     case Bytecodes::_dup:
1194       { Value w = state()->raw_pop();
1195         state()->raw_push(w);
1196         state()->raw_push(w);
1197       }
1198       break;
1199     case Bytecodes::_dup_x1:
1200       { Value w1 = state()->raw_pop();
1201         Value w2 = state()->raw_pop();
1202         state()->raw_push(w1);
1203         state()->raw_push(w2);
1204         state()->raw_push(w1);
1205       }
1206       break;
1207     case Bytecodes::_dup_x2:
1208       { Value w1 = state()->raw_pop();
1209         Value w2 = state()->raw_pop();
1210         Value w3 = state()->raw_pop();

1346 
1347 
1348 void GraphBuilder::_goto(int from_bci, int to_bci) {
1349   Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
1350   if (is_profiling()) {
1351     compilation()->set_would_profile(true);
1352     x->set_profiled_bci(bci());
1353     if (profile_branches()) {
1354       x->set_profiled_method(method());
1355       x->set_should_profile(true);
1356     }
1357   }
1358   append(x);
1359 }
1360 
1361 
1362 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
1363   BlockBegin* tsux = block_at(stream()->get_dest());
1364   BlockBegin* fsux = block_at(stream()->next_bci());
1365   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
1366 
1367   bool subst_check = false;
1368   if (Arguments::is_valhalla_enabled() && (stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne)) {
1369     ValueType* left_vt = x->type();
1370     ValueType* right_vt = y->type();
1371     if (left_vt->is_object()) {
1372       assert(right_vt->is_object(), "must be");
1373       ciKlass* left_klass = x->as_loaded_klass_or_null();
1374       ciKlass* right_klass = y->as_loaded_klass_or_null();
1375 
1376       if (left_klass == nullptr || right_klass == nullptr) {
1377         // The klass is still unloaded, or came from a Phi node. Go slow case;
1378         subst_check = true;
1379       } else if (left_klass->can_be_inline_klass() || right_klass->can_be_inline_klass()) {
1380         // Either operand may be a value object, but we're not sure. Go slow case;
1381         subst_check = true;
1382       } else {
1383         // No need to do substitutability check
1384       }
1385     }
1386   }
1387   if ((stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne) &&
1388       is_profiling() && profile_branches()) {
1389     compilation()->set_would_profile(true);
1390     append(new ProfileACmpTypes(method(), bci(), x, y));
1391   }
1392 
1393   // In case of loop invariant code motion or predicate insertion
1394   // before the body of a loop the state is needed
1395   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic() || subst_check) ? state_before : nullptr, is_bb, subst_check));
1396 
1397   assert(i->as_Goto() == nullptr ||
1398          (i->as_Goto()->sux_at(0) == tsux  && i->as_Goto()->is_safepoint() == (tsux->bci() < stream()->cur_bci())) ||
1399          (i->as_Goto()->sux_at(0) == fsux  && i->as_Goto()->is_safepoint() == (fsux->bci() < stream()->cur_bci())),
1400          "safepoint state of Goto returned by canonicalizer incorrect");
1401 
1402   if (is_profiling()) {
1403     If* if_node = i->as_If();
1404     if (if_node != nullptr) {
1405       // Note that we'd collect profile data in this method if we wanted it.
1406       compilation()->set_would_profile(true);
1407       // At level 2 we need the proper bci to count backedges
1408       if_node->set_profiled_bci(bci());
1409       if (profile_branches()) {
1410         // Successors can be rotated by the canonicalizer, check for this case.
1411         if_node->set_profiled_method(method());
1412         if_node->set_should_profile(true);
1413         if (if_node->tsux() == fsux) {
1414           if_node->set_swapped(true);
1415         }

1630   }
1631 
1632   if (needs_check) {
1633     // Perform the registration of finalizable objects.
1634     ValueStack* state_before = copy_state_for_exception();
1635     load_local(objectType, 0);
1636     append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
1637                                state()->pop_arguments(1),
1638                                true, state_before, true));
1639   }
1640 }
1641 
1642 
1643 void GraphBuilder::method_return(Value x, bool ignore_return) {
1644   if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1645     call_register_finalizer();
1646   }
1647 
1648   // The conditions for a memory barrier are described in Parse::do_exits().
1649   bool need_mem_bar = false;
1650   if (method()->is_object_constructor() &&
1651        (scope()->wrote_final() || scope()->wrote_stable() ||
1652          (AlwaysSafeConstructors && scope()->wrote_fields()) ||
1653          (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) {
1654     need_mem_bar = true;
1655   }
1656 
1657   BasicType bt = method()->return_type()->basic_type();
1658   switch (bt) {
1659     case T_BYTE:
1660     {
1661       Value shift = append(new Constant(new IntConstant(24)));
1662       x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1663       x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
1664       break;
1665     }
1666     case T_SHORT:
1667     {
1668       Value shift = append(new Constant(new IntConstant(16)));
1669       x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1670       x = append(new ShiftOp(Bytecodes::_ishr, x, shift));

1781   // Attach dimension info to stable arrays.
1782   if (FoldStableValues &&
1783       field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) {
1784     ciArray* array = field_value.as_object()->as_array();
1785     jint dimension = field->type()->as_array_klass()->dimension();
1786     value = new StableArrayConstant(array, dimension);
1787   }
1788 
1789   switch (field_type) {
1790     case T_ARRAY:
1791     case T_OBJECT:
1792       if (field_value.as_object()->should_be_constant()) {
1793         return new Constant(value);
1794       }
1795       return nullptr; // Not a constant.
1796     default:
1797       return new Constant(value);
1798   }
1799 }
1800 
1801 void GraphBuilder::copy_inline_content(ciInlineKlass* vk, Value src, int src_off, Value dest, int dest_off, ValueStack* state_before, ciField* enclosing_field) {
1802   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1803     ciField* field = vk->declared_nonstatic_field_at(i);
1804     int offset = field->offset_in_bytes() - vk->payload_offset();
1805     if (field->is_flat()) {
1806       copy_inline_content(field->type()->as_inline_klass(), src, src_off + offset, dest, dest_off + offset, state_before, enclosing_field);
1807       if (!field->is_null_free()) {
1808         // Nullable, copy the null marker using Unsafe because null markers are no real fields
1809         int null_marker_offset = field->null_marker_offset() - vk->payload_offset();
1810         Value offset = append(new Constant(new LongConstant(src_off + null_marker_offset)));
1811         Value nm = append(new UnsafeGet(T_BOOLEAN, src, offset, false));
1812         offset = append(new Constant(new LongConstant(dest_off + null_marker_offset)));
1813         append(new UnsafePut(T_BOOLEAN, dest, offset, nm, false));
1814       }
1815     } else {
1816       Value value = append(new LoadField(src, src_off + offset, field, false, state_before, false));
1817       StoreField* store = new StoreField(dest, dest_off + offset, field, value, false, state_before, false);
1818       store->set_enclosing_field(enclosing_field);
1819       append(store);
1820     }
1821   }
1822 }
1823 
1824 void GraphBuilder::access_field(Bytecodes::Code code) {
1825   bool will_link;
1826   ciField* field = stream()->get_field(will_link);
1827   ciInstanceKlass* holder = field->holder();
1828   BasicType field_type = field->type()->basic_type();
1829   ValueType* type = as_ValueType(field_type);
1830 
1831   // call will_link again to determine if the field is valid.
1832   const bool needs_patching = !holder->is_loaded() ||
1833                               !field->will_link(method(), code) ||
1834                               (!field->is_flat() && PatchALot);
1835 
1836   ValueStack* state_before = nullptr;
1837   if (!holder->is_initialized() || needs_patching) {
1838     // save state before instruction for debug info when
1839     // deoptimization happens during patching
1840     state_before = copy_state_before();
1841   }
1842 
1843   Value obj = nullptr;
1844   if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
1845     if (state_before != nullptr) {
1846       // build a patching constant
1847       obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
1848     } else {
1849       obj = new Constant(new InstanceConstant(holder->java_mirror()));
1850     }
1851   }
1852 
1853   if (code == Bytecodes::_putfield) {
1854     scope()->set_wrote_fields();
1855     if (field->is_volatile()) {
1856       scope()->set_wrote_volatile();
1857     }
1858     if (field->is_final()) {
1859       scope()->set_wrote_final();
1860     }
1861     if (field->is_stable()) {
1862       scope()->set_wrote_stable();
1863     }
1864   }
1865 
1866   int offset = !needs_patching ? field->offset_in_bytes() : -1;
1867   switch (code) {
1868     case Bytecodes::_getstatic: {
1869       // check for compile-time constants, i.e., initialized static final fields
1870       Value constant = nullptr;
1871       if (field->is_static_constant() && !PatchALot) {
1872         ciConstant field_value = field->constant_value();
1873         assert(!field->is_stable() || !field_value.is_null_or_zero(),
1874                "stable static w/ default value shouldn't be a constant");
1875         constant = make_constant(field_value, field);
1876       }
1877       if (constant != nullptr) {
1878         push(type, append(constant));
1879       } else {
1880         if (state_before == nullptr) {
1881           state_before = copy_state_for_exception();
1882         }
1883         LoadField* load_field = new LoadField(append(obj), offset, field, true,
1884                                         state_before, needs_patching);
1885         push(type, append(load_field));
1886       }
1887       break;
1888     }
1889     case Bytecodes::_putstatic: {
1890       Value val = pop(type);
1891       if (state_before == nullptr) {
1892         state_before = copy_state_for_exception();
1893       }
1894       if (field_type == T_BOOLEAN) {
1895         Value mask = append(new Constant(new IntConstant(1)));
1896         val = append(new LogicOp(Bytecodes::_iand, val, mask));
1897       }
1898       if (field->is_null_free()) {
1899         null_check(val);
1900       }
1901       if (field->is_null_free() && field->type()->is_loaded() && field->type()->as_inline_klass()->is_empty() && (!method()->is_class_initializer() || field->is_flat())) {
1902         // Storing to a field of an empty, null-free inline type that is already initialized. Ignore.
1903         break;
1904       }
1905       append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
1906       break;
1907     }
1908     case Bytecodes::_getfield: {
1909       // Check for compile-time constants, i.e., trusted final non-static fields.
1910       Value constant = nullptr;
1911       if (state_before == nullptr && field->is_flat()) {
1912         // Save the entire state and re-execute on deopt when accessing flat fields
1913         assert(Interpreter::bytecode_should_reexecute(code), "should reexecute");
1914         state_before = copy_state_before();
1915       }
1916       if (!has_pending_field_access() && !has_pending_load_indexed()) {
1917         obj = apop();
1918         ObjectType* obj_type = obj->type()->as_ObjectType();
1919         if (field->is_constant() && !field->is_flat() && obj_type->is_constant() && !PatchALot) {
1920           ciObject* const_oop = obj_type->constant_value();
1921           if (!const_oop->is_null_object() && const_oop->is_loaded()) {
1922             ciConstant field_value = field->constant_value_of(const_oop);
1923             if (field_value.is_valid()) {
1924               constant = make_constant(field_value, field);
1925               // For CallSite objects add a dependency for invalidation of the optimization.
1926               if (field->is_call_site_target()) {
1927                 ciCallSite* call_site = const_oop->as_call_site();
1928                 if (!call_site->is_fully_initialized_constant_call_site()) {
1929                   ciMethodHandle* target = field_value.as_object()->as_method_handle();
1930                   dependency_recorder()->assert_call_site_target_value(call_site, target);
1931                 }
1932               }
1933             }
1934           }
1935         }
1936       }
1937       if (constant != nullptr) {
1938         push(type, append(constant));
1939       } else {
1940         if (state_before == nullptr) {
1941           state_before = copy_state_for_exception();
1942         }
1943         if (!field->is_flat()) {
1944           if (has_pending_field_access()) {
1945             assert(!needs_patching, "Can't patch delayed field access");
1946             obj = pending_field_access()->obj();
1947             offset += pending_field_access()->offset() - field->holder()->as_inline_klass()->payload_offset();
1948             field = pending_field_access()->holder()->get_field_by_offset(offset, false);
1949             assert(field != nullptr, "field not found");
1950             set_pending_field_access(nullptr);
1951           } else if (has_pending_load_indexed()) {
1952             assert(!needs_patching, "Can't patch delayed field access");
1953             pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
1954             LoadIndexed* li = pending_load_indexed()->load_instr();
1955             li->set_type(type);
1956             push(type, append(li));
1957             set_pending_load_indexed(nullptr);




1958             break;
1959           }
1960           LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
1961           Value replacement = !needs_patching ? _memory->load(load) : load;
1962           if (replacement != load) {
1963             assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
1964             // Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing
1965             // conversion. Emit an explicit conversion here to get the correct field value after the write.
1966             switch (field_type) {
1967             case T_BOOLEAN:
1968             case T_BYTE:
1969               replacement = append(new Convert(Bytecodes::_i2b, replacement, type));
1970               break;
1971             case T_CHAR:
1972               replacement = append(new Convert(Bytecodes::_i2c, replacement, type));
1973               break;
1974             case T_SHORT:
1975               replacement = append(new Convert(Bytecodes::_i2s, replacement, type));
1976               break;
1977             default:
1978               break;
1979             }
1980             push(type, replacement);
1981           } else {
1982             push(type, append(load));
1983           }
1984         } else {
1985           // Flat field
1986           assert(!needs_patching, "Can't patch flat inline type field access");
1987           ciInlineKlass* inline_klass = field->type()->as_inline_klass();
1988           if (field->is_atomic()) {
1989             assert(!has_pending_field_access(), "Pending field accesses are not supported");
1990             LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
1991             push(type, append(load));
1992           } else {
1993             // Look at the next bytecode to check if we can delay the field access
1994             bool can_delay_access = false;
1995             if (field->is_null_free()) {
1996               ciBytecodeStream s(method());
1997               s.force_bci(bci());
1998               s.next();
1999               if (s.cur_bc() == Bytecodes::_getfield && !needs_patching) {
2000                 ciField* next_field = s.get_field(will_link);
2001                 bool next_needs_patching = !next_field->holder()->is_loaded() ||
2002                                           !next_field->will_link(method(), Bytecodes::_getfield) ||
2003                                           PatchALot;
2004                 // We can't update the offset for atomic accesses
2005                 bool next_needs_atomic_access = next_field->is_flat() && next_field->is_atomic();
2006                 can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching && !next_needs_atomic_access && next_field->is_null_free();
2007               }
2008             }
2009 
2010             if (can_delay_access) {
2011               if (has_pending_load_indexed()) {
2012                 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
2013               } else if (has_pending_field_access()) {
2014                 pending_field_access()->inc_offset(offset - field->holder()->as_inline_klass()->payload_offset());
2015               } else {
2016                 null_check(obj);
2017                 DelayedFieldAccess* dfa = new DelayedFieldAccess(obj, field->holder(), field->offset_in_bytes(), state_before);
2018                 set_pending_field_access(dfa);
2019               }
2020             } else {
2021               scope()->set_wrote_final();
2022               scope()->set_wrote_fields();
2023               if (has_pending_load_indexed()) {
2024                 assert(field->is_null_free(), "nullable fields do not support delayed accesses yet");
2025                 assert(!needs_patching, "Can't patch delayed field access");
2026                 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
2027                 NewInstance* vt = new NewInstance(inline_klass, pending_load_indexed()->state_before(), false, true);
2028                 _memory->new_instance(vt);
2029                 pending_load_indexed()->load_instr()->set_vt(vt);
2030                 apush(append_split(vt));
2031                 append(pending_load_indexed()->load_instr());
2032                 set_pending_load_indexed(nullptr);
2033               } else if (has_pending_field_access()) {
2034                 assert(field->is_null_free(), "nullable fields do not support delayed accesses yet");
2035                 state_before = pending_field_access()->state_before();
2036                 NewInstance* new_instance = new NewInstance(inline_klass, state_before, false, true);
2037                 _memory->new_instance(new_instance);
2038                 apush(append_split(new_instance));
2039                 copy_inline_content(inline_klass, pending_field_access()->obj(),
2040                                     pending_field_access()->offset() + field->offset_in_bytes() - field->holder()->as_inline_klass()->payload_offset(),
2041                                     new_instance, inline_klass->payload_offset(), state_before);
2042                 set_pending_field_access(nullptr);
2043               } else {
2044                 if (!field->is_null_free() && !inline_klass->is_initialized()) {
2045                   // Cannot allocate an instance of inline_klass because it may have not been
2046                   // initialized, bailout for now
2047                   bailout("load from an uninitialized nullable non-atomic flat field");
2048                   return;
2049                 }
2050 
2051                 NewInstance* new_instance = new NewInstance(inline_klass, state_before, false, true);
2052                 _memory->new_instance(new_instance);
2053                 append_split(new_instance);
2054 
2055                 if (inline_klass->is_initialized() && inline_klass->is_empty()) {
2056                   // Needs an explicit null check because below code does not perform any actual load if there are no fields
2057                   null_check(obj);
2058                 }
2059                 copy_inline_content(inline_klass, obj, field->offset_in_bytes(), new_instance, inline_klass->payload_offset(), state_before);
2060 
2061                 Instruction* result = new_instance;
2062                 if (!field->is_null_free()) {
2063                   Value int_zero = append(new Constant(intZero));
2064                   Value object_null = append(new Constant(objectNull));
2065                   Value nm_offset = append(new Constant(new LongConstant(offset + inline_klass->null_marker_offset_in_payload())));
2066                   Value nm = append(new UnsafeGet(T_BOOLEAN, obj, nm_offset, false));
2067                   result = append(new IfOp(nm, Instruction::neq, int_zero, new_instance, object_null, state_before, false));
2068                 }
2069                 apush(result);
2070               }
2071 
2072               // If we allocated a new instance ensure the stores to copy the
2073               // field contents are visible before any subsequent store that
2074               // publishes this reference.
2075               append(new MemBar(lir_membar_storestore));
2076             }
2077           }
2078         }
2079       }
2080       break;
2081     }
2082     case Bytecodes::_putfield: {
2083       Value val = pop(type);
2084       obj = apop();
2085       if (state_before == nullptr) {
2086         state_before = copy_state_for_exception();
2087       }
2088       if (field_type == T_BOOLEAN) {
2089         Value mask = append(new Constant(new IntConstant(1)));
2090         val = append(new LogicOp(Bytecodes::_iand, val, mask));
2091       }
2092 
2093       if (field->is_null_free() && field->type()->is_loaded() && field->type()->as_inline_klass()->is_empty() && (!method()->is_object_constructor() || field->is_flat())) {
2094         // Storing to a field of an empty, null-free inline type that is already initialized. Ignore.
2095         null_check(obj);
2096         null_check(val);
2097       } else if (!field->is_flat()) {
2098         if (field->is_null_free()) {
2099           null_check(val);
2100         }
2101         StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching);
2102         if (!needs_patching) store = _memory->store(store);
2103         if (store != nullptr) {
2104           append(store);
2105         }
2106       } else {
2107         // Flat field
2108         assert(!needs_patching, "Can't patch flat inline type field access");
2109         ciInlineKlass* inline_klass = field->type()->as_inline_klass();
2110         if (field->is_atomic()) {
2111           if (field->is_null_free()) {
2112             null_check(val);
2113           }
2114           append(new StoreField(obj, offset, field, val, false, state_before, needs_patching));
2115         } else if (field->is_null_free()) {
2116           assert(!inline_klass->is_empty(), "should have been handled");
2117           copy_inline_content(inline_klass, val, inline_klass->payload_offset(), obj, offset, state_before, field);
2118         } else {
2119           if (!inline_klass->is_initialized()) {
2120             // null_reset_value is not available, bailout for now
2121             bailout("store to an uninitialized nullable non-atomic flat field");
2122             return;
2123           }
2124 
2125           // Store the subfields when field is a nullable non-atomic field
2126           Value object_null = append(new Constant(objectNull));
2127           Value null_reset_value = append(new Constant(new ObjectConstant(inline_klass->get_null_reset_value().as_object())));
2128           Value src = append(new IfOp(val, Instruction::neq, object_null, val, null_reset_value, state_before, false));
2129           copy_inline_content(inline_klass, src, inline_klass->payload_offset(), obj, offset, state_before);
2130 
2131           // Store the null marker
2132           Value int_one = append(new Constant(new IntConstant(1)));
2133           Value int_zero = append(new Constant(intZero));
2134           Value nm = append(new IfOp(val, Instruction::neq, object_null, int_one, int_zero, state_before, false));
2135           Value nm_offset = append(new Constant(new LongConstant(offset + inline_klass->null_marker_offset_in_payload())));
2136           append(new UnsafePut(T_BOOLEAN, obj, nm_offset, nm, false));
2137         }
2138       }
2139       break;
2140     }
2141     default:
2142       ShouldNotReachHere();
2143       break;
2144   }
2145 }
2146 

2147 Dependencies* GraphBuilder::dependency_recorder() const {
2148   return compilation()->dependency_recorder();
2149 }
2150 
2151 // How many arguments do we want to profile?
2152 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) {
2153   int n = 0;
2154   bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
2155   start = has_receiver ? 1 : 0;
2156   if (profile_arguments()) {
2157     ciProfileData* data = method()->method_data()->bci_to_data(bci());
2158     if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) {
2159       n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
2160     }
2161   }
2162   // If we are inlining then we need to collect arguments to profile parameters for the target
2163   if (profile_parameters() && target != nullptr) {
2164     if (target->method_data() != nullptr && target->method_data()->parameters_type_data() != nullptr) {
2165       // The receiver is profiled on method entry so it's included in
2166       // the number of parameters but here we're only interested in

2242       break;
2243     case Bytecodes::_invokehandle:
2244       code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
2245       break;
2246     default:
2247       break;
2248     }
2249   } else {
2250     if (bc_raw == Bytecodes::_invokehandle) {
2251       assert(!will_link, "should come here only for unlinked call");
2252       code = Bytecodes::_invokespecial;
2253     }
2254   }
2255 
2256   if (code == Bytecodes::_invokespecial) {
2257     // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.
2258     ciKlass* receiver_constraint = nullptr;
2259 
2260     if (bc_raw == Bytecodes::_invokeinterface) {
2261       receiver_constraint = holder;
2262     } else if (bc_raw == Bytecodes::_invokespecial && !target->is_object_constructor() && calling_klass->is_interface()) {
2263       receiver_constraint = calling_klass;
2264     }
2265 
2266     if (receiver_constraint != nullptr) {
2267       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
2268       Value receiver = state()->stack_at(index);
2269       CheckCast* c = new CheckCast(receiver_constraint, receiver, copy_state_before());
2270       // go to uncommon_trap when checkcast fails
2271       c->set_invokespecial_receiver_check();
2272       state()->stack_at_put(index, append_split(c));
2273     }
2274   }
2275 
2276   // Push appendix argument (MethodType, CallSite, etc.), if one.
2277   bool patch_for_appendix = false;
2278   int patching_appendix_arg = 0;
2279   if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) {
2280     Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
2281     apush(arg);
2282     patch_for_appendix = true;

2432       }
2433     } else {
2434       print_inlining(target, "no static binding", /*success*/ false);
2435     }
2436   } else {
2437     print_inlining(target, "not inlineable", /*success*/ false);
2438   }
2439 
2440   // If we attempted an inline which did not succeed because of a
2441   // bailout during construction of the callee graph, the entire
2442   // compilation has to be aborted. This is fairly rare and currently
2443   // seems to only occur for jasm-generated classes which contain
2444   // jsr/ret pairs which are not associated with finally clauses and
2445   // do not have exception handlers in the containing method, and are
2446   // therefore not caught early enough to abort the inlining without
2447   // corrupting the graph. (We currently bail out with a non-empty
2448   // stack at a ret in these situations.)
2449   CHECK_BAILOUT();
2450 
2451   // inlining not successful => standard invoke
2452   ciType* return_type = declared_signature->return_type();
2453   ValueStack* state_before = copy_state_exhandling();
2454 
2455   // The bytecode (code) might change in this method so we are checking this very late.
2456   const bool has_receiver =
2457     code == Bytecodes::_invokespecial   ||
2458     code == Bytecodes::_invokevirtual   ||
2459     code == Bytecodes::_invokeinterface;
2460   Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
2461   Value recv = has_receiver ? apop() : nullptr;
2462 
2463   // A null check is required here (when there is a receiver) for any of the following cases
2464   // - invokespecial, always need a null check.
2465   // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized
2466   //   and require null checking. If the target is loaded a null check is emitted here.
2467   //   If the target isn't loaded the null check must happen after the call resolution. We achieve that
2468   //   by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry).
2469   //   (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may
2470   //   potentially fail, and can't have the null check before the resolution.)
2471   // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same
2472   //   reason as above, so calls with a receiver to unloaded targets can't be profiled.)

2481       null_check(recv);
2482     }
2483 
2484     if (is_profiling()) {
2485       // Note that we'd collect profile data in this method if we wanted it.
2486       compilation()->set_would_profile(true);
2487 
2488       if (profile_calls()) {
2489         assert(cha_monomorphic_target == nullptr || exact_target == nullptr, "both can not be set");
2490         ciKlass* target_klass = nullptr;
2491         if (cha_monomorphic_target != nullptr) {
2492           target_klass = cha_monomorphic_target->holder();
2493         } else if (exact_target != nullptr) {
2494           target_klass = exact_target->holder();
2495         }
2496         profile_call(target, recv, target_klass, collect_args_for_profiling(args, nullptr, false), false);
2497       }
2498     }
2499   }
2500 
2501   Invoke* result = new Invoke(code, return_type, recv, args, target, state_before);
2502   // push result
2503   append_split(result);
2504 
2505   if (!return_type->is_void()) {
2506     push(as_ValueType(return_type), result);
2507   }
2508 
2509   if (profile_return() && return_type->is_object()) {
2510     profile_return_type(result, target);
2511   }
2512 }
2513 
2514 
2515 void GraphBuilder::new_instance(int klass_index) {
2516   ValueStack* state_before = copy_state_exhandling();
2517   ciKlass* klass = stream()->get_klass();
2518   assert(klass->is_instance_klass(), "must be an instance klass");
2519   NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass(), false);
2520   _memory->new_instance(new_instance);
2521   apush(append_split(new_instance));
2522 }
2523 

2524 void GraphBuilder::new_type_array() {
2525   ValueStack* state_before = copy_state_exhandling();
2526   apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true)));
2527 }
2528 
2529 
2530 void GraphBuilder::new_object_array() {
2531   ciKlass* klass = stream()->get_klass();
2532   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2533   NewArray* n = new NewObjectArray(klass, ipop(), state_before);
2534   apush(append_split(n));
2535 }
2536 
2537 
2538 bool GraphBuilder::direct_compare(ciKlass* k) {
2539   if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
2540     ciInstanceKlass* ik = k->as_instance_klass();
2541     if (ik->is_final()) {
2542       return true;
2543     } else {

2576   ciKlass* klass = stream()->get_klass();
2577   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2578   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
2579   ipush(append_split(i));
2580   i->set_direct_compare(direct_compare(klass));
2581 
2582   if (is_profiling()) {
2583     // Note that we'd collect profile data in this method if we wanted it.
2584     compilation()->set_would_profile(true);
2585 
2586     if (profile_checkcasts()) {
2587       i->set_profiled_method(method());
2588       i->set_profiled_bci(bci());
2589       i->set_should_profile(true);
2590     }
2591   }
2592 }
2593 
2594 
2595 void GraphBuilder::monitorenter(Value x, int bci) {
2596   bool maybe_inlinetype = false;
2597   if (bci == InvocationEntryBci) {
2598     // Called by GraphBuilder::inline_sync_entry.
2599 #ifdef ASSERT
2600     ciType* obj_type = x->declared_type();
2601     assert(obj_type == nullptr || !obj_type->is_inlinetype(), "inline types cannot have synchronized methods");
2602 #endif
2603   } else {
2604     // We are compiling a monitorenter bytecode
2605     if (Arguments::is_valhalla_enabled()) {
2606       ciType* obj_type = x->declared_type();
2607       if (obj_type == nullptr || obj_type->as_klass()->can_be_inline_klass()) {
2608         // If we're (possibly) locking on an inline type, check for markWord::always_locked_pattern
2609         // and throw IMSE. (obj_type is null for Phi nodes, so let's just be conservative).
2610         maybe_inlinetype = true;
2611       }
2612     }
2613   }
2614 
2615   // save state before locking in case of deoptimization after a NullPointerException
2616   ValueStack* state_before = copy_state_for_exception_with_bci(bci);
2617   append_with_bci(new MonitorEnter(x, state()->lock(x), state_before, maybe_inlinetype), bci);
2618   kill_all();
2619 }
2620 
2621 
2622 void GraphBuilder::monitorexit(Value x, int bci) {
2623   append_with_bci(new MonitorExit(x, state()->unlock()), bci);
2624   kill_all();
2625 }
2626 
2627 
2628 void GraphBuilder::new_multi_array(int dimensions) {
2629   ciKlass* klass = stream()->get_klass();
2630   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2631 
2632   Values* dims = new Values(dimensions, dimensions, nullptr);
2633   // fill in all dimensions
2634   int i = dimensions;
2635   while (i-- > 0) dims->at_put(i, ipop());
2636   // create array
2637   NewArray* n = new NewMultiArray(klass, dims, state_before);

2722 
2723 Instruction* GraphBuilder::append_split(StateSplit* instr) {
2724   return append_with_bci(instr, bci());
2725 }
2726 
2727 
2728 void GraphBuilder::null_check(Value value) {
2729   if (value->as_NewArray() != nullptr || value->as_NewInstance() != nullptr) {
2730     return;
2731   } else {
2732     Constant* con = value->as_Constant();
2733     if (con) {
2734       ObjectType* c = con->type()->as_ObjectType();
2735       if (c && c->is_loaded()) {
2736         ObjectConstant* oc = c->as_ObjectConstant();
2737         if (!oc || !oc->value()->is_null_object()) {
2738           return;
2739         }
2740       }
2741     }
2742     if (value->is_null_free()) return;
2743   }
2744   append(new NullCheck(value, copy_state_for_exception()));
2745 }
2746 
2747 
2748 
2749 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
2750   if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != nullptr)) {
2751     assert(instruction->exception_state() == nullptr
2752            || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
2753            || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()),
2754            "exception_state should be of exception kind");
2755     return new XHandlers();
2756   }
2757 
2758   XHandlers*  exception_handlers = new XHandlers();
2759   ScopeData*  cur_scope_data = scope_data();
2760   ValueStack* cur_state = instruction->state_before();
2761   ValueStack* prev_state = nullptr;
2762   int scope_count = 0;
2763 
2764   assert(cur_state != nullptr, "state_before must be set");
2765   do {
2766     int cur_bci = cur_state->bci();
2767     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2768     assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci()
2769            || has_pending_field_access() || has_pending_load_indexed(), "invalid bci");
2770 
2771 
2772     // join with all potential exception handlers
2773     XHandlers* list = cur_scope_data->xhandlers();
2774     const int n = list->length();
2775     for (int i = 0; i < n; i++) {
2776       XHandler* h = list->handler_at(i);
2777       if (h->covers(cur_bci)) {
2778         // h is a potential exception handler => join it
2779         compilation()->set_has_exception_handlers(true);
2780 
2781         BlockBegin* entry = h->entry_block();
2782         if (entry == block()) {
2783           // It's acceptable for an exception handler to cover itself
2784           // but we don't handle that in the parser currently.  It's
2785           // very rare so we bailout instead of trying to handle it.
2786           BAILOUT_("exception handler covers itself", exception_handlers);
2787         }
2788         assert(entry->bci() == h->handler_bci(), "must match");
2789         assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
2790 

3564     state->store_local(idx, new Local(type, vt, idx, false));
3565     idx += type->size();
3566   }
3567 
3568   // lock synchronized method
3569   if (method()->is_synchronized()) {
3570     state->lock(nullptr);
3571   }
3572 
3573   return state;
3574 }
3575 
3576 
3577 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
3578   : _scope_data(nullptr)
3579   , _compilation(compilation)
3580   , _memory(new MemoryBuffer())
3581   , _inline_bailout_msg(nullptr)
3582   , _instruction_count(0)
3583   , _osr_entry(nullptr)
3584   , _pending_field_access(nullptr)
3585   , _pending_load_indexed(nullptr)
3586 {
3587   int osr_bci = compilation->osr_bci();
3588 
3589   // determine entry points and bci2block mapping
3590   BlockListBuilder blm(compilation, scope, osr_bci);
3591   CHECK_BAILOUT();
3592 
3593   BlockList* bci2block = blm.bci2block();
3594   BlockBegin* start_block = bci2block->at(0);
3595 
3596   push_root_scope(scope, bci2block, start_block);
3597 
3598   // setup state for std entry
3599   _initial_state = state_at_entry();
3600   start_block->merge(_initial_state, compilation->has_irreducible_loops());
3601 
3602   // End nulls still exist here
3603 
3604   // complete graph
3605   _vmap        = new ValueMap();

4318   // Temporarily set up bytecode stream so we can append instructions
4319   // (only using the bci of this stream)
4320   scope_data()->set_stream(scope_data()->parent()->stream());
4321 
4322   // Pass parameters into callee state: add assignments
4323   // note: this will also ensure that all arguments are computed before being passed
4324   ValueStack* callee_state = state();
4325   ValueStack* caller_state = state()->caller_state();
4326   for (int i = args_base; i < caller_state->stack_size(); ) {
4327     const int arg_no = i - args_base;
4328     Value arg = caller_state->stack_at_inc(i);
4329     store_local(callee_state, arg, arg_no);
4330   }
4331 
4332   // Remove args from stack.
4333   // Note that we preserve locals state in case we can use it later
4334   // (see use of pop_scope() below)
4335   caller_state->truncate_stack(args_base);
4336   assert(callee_state->stack_size() == 0, "callee stack must be empty");
4337 
4338   // Check if we need a membar at the beginning of the java.lang.Object
4339   // constructor to satisfy the memory model for strict fields.
4340   if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
4341     Value receiver = state()->local_at(0);
4342     ciType* klass = receiver->exact_type();
4343     if (klass == nullptr) {
4344       // No exact type, check if the declared type has no implementors and add a dependency
4345       klass = receiver->declared_type();
4346       klass = compilation()->cha_exact_type(klass);
4347     }
4348     if (klass != nullptr && klass->is_instance_klass()) {
4349       // Exact receiver type, check if there is a strict field
4350       ciInstanceKlass* holder = klass->as_instance_klass();
4351       for (int i = 0; i < holder->nof_nonstatic_fields(); i++) {
4352         ciField* field = holder->nonstatic_field_at(i);
4353         if (field->is_strict()) {
4354           // Found a strict field, a membar is needed
4355           append(new MemBar(lir_membar_storestore));
4356           break;
4357         }
4358       }
4359     } else if (klass == nullptr) {
4360       // We can't statically determine the type of the receiver and therefore need
4361       // to put a membar here because it could have a strict field.
4362       append(new MemBar(lir_membar_storestore));
4363     }
4364   }
4365 
4366   Value lock = nullptr;
4367   BlockBegin* sync_handler = nullptr;
4368 
4369   // Inline the locking of the receiver if the callee is synchronized
4370   if (callee->is_synchronized()) {
4371     lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
4372                                : state()->local_at(0);
4373     sync_handler = new BlockBegin(SynchronizationEntryBCI);
4374     inline_sync_entry(lock, sync_handler);
4375   }
4376 
4377   if (compilation()->env()->dtrace_method_probes()) {
4378     Values* args = new Values(1);
4379     args->push(append(new Constant(new MethodConstant(method()))));
4380     append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
4381   }
4382 
4383   if (profile_inlined_calls()) {
4384     profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI));
4385   }
< prev index next >