< prev index next >

src/hotspot/share/c1/c1_GraphBuilder.cpp

Print this page

   1 /*
   2  * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "c1/c1_Canonicalizer.hpp"
  26 #include "c1/c1_CFGPrinter.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_GraphBuilder.hpp"

  29 #include "c1/c1_InstructionPrinter.hpp"

  30 #include "ci/ciCallSite.hpp"
  31 #include "ci/ciField.hpp"


  32 #include "ci/ciKlass.hpp"
  33 #include "ci/ciMemberName.hpp"
  34 #include "ci/ciSymbols.hpp"
  35 #include "ci/ciUtilities.inline.hpp"
  36 #include "classfile/javaClasses.hpp"
  37 #include "compiler/compilationPolicy.hpp"
  38 #include "compiler/compileBroker.hpp"
  39 #include "compiler/compilerEvent.hpp"
  40 #include "interpreter/bytecode.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "memory/resourceArea.hpp"

  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/checkedCast.hpp"
  45 #include "utilities/macros.hpp"
  46 #if INCLUDE_JFR
  47 #include "jfr/jfr.hpp"
  48 #endif
  49 
  50 class BlockListBuilder {
  51  private:
  52   Compilation* _compilation;
  53   IRScope*     _scope;
  54 
  55   BlockList    _blocks;                // internal list of all blocks
  56   BlockList*   _bci2block;             // mapping from bci to blocks for GraphBuilder
  57   GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend
  58 
  59   // fields used by mark_loops
  60   ResourceBitMap _active;              // for iteration of control flow graph
  61   ResourceBitMap _visited;             // for iteration of control flow graph
  62   GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop

1030       // they are using this local. We don't handle skipping over a
1031       // ret.
1032       for (ScopeData* cur_scope_data = scope_data()->parent();
1033            cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
1034            cur_scope_data = cur_scope_data->parent()) {
1035         if (cur_scope_data->jsr_return_address_local() == index) {
1036           BAILOUT("subroutine overwrites return address from previous subroutine");
1037         }
1038       }
1039     } else if (index == scope_data()->jsr_return_address_local()) {
1040       scope_data()->set_jsr_return_address_local(-1);
1041     }
1042   }
1043 
1044   state->store_local(index, x);
1045 }
1046 
1047 
1048 void GraphBuilder::load_indexed(BasicType type) {
1049   // In case of in block code motion in range check elimination
1050   ValueStack* state_before = copy_state_indexed_access();








1051   compilation()->set_has_access_indexed(true);
1052   Value index = ipop();
1053   Value array = apop();
1054   Value length = nullptr;
1055   if (CSEArrayLength ||
1056       (array->as_Constant() != nullptr) ||
1057       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1058       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1059       (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1060     length = append(new ArrayLength(array, state_before));
1061   }
1062   push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before)));

























































1063 }
1064 
1065 
1066 void GraphBuilder::store_indexed(BasicType type) {
1067   // In case of in block code motion in range check elimination
1068   ValueStack* state_before = copy_state_indexed_access();








1069   compilation()->set_has_access_indexed(true);
1070   Value value = pop(as_ValueType(type));
1071   Value index = ipop();
1072   Value array = apop();
1073   Value length = nullptr;
1074   if (CSEArrayLength ||
1075       (array->as_Constant() != nullptr) ||
1076       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1077       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1078       (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1079     length = append(new ArrayLength(array, state_before));
1080   }
1081   ciType* array_type = array->declared_type();
1082   bool check_boolean = false;
1083   if (array_type != nullptr) {
1084     if (array_type->is_loaded() &&
1085       array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) {
1086       assert(type == T_BYTE, "boolean store uses bastore");
1087       Value mask = append(new Constant(new IntConstant(1)));
1088       value = append(new LogicOp(Bytecodes::_iand, value, mask));
1089     }
1090   } else if (type == T_BYTE) {
1091     check_boolean = true;
1092   }
1093   StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before, check_boolean);
1094   append(result);
1095   _memory->store_value(value);
1096 
1097   if (type == T_OBJECT && is_profiling()) {
1098     // Note that we'd collect profile data in this method if we wanted it.
1099     compilation()->set_would_profile(true);
1100 
1101     if (profile_checkcasts()) {
1102       result->set_profiled_method(method());
1103       result->set_profiled_bci(bci());
1104       result->set_should_profile(true);
1105     }
1106   }



1107 }
1108 
1109 
1110 void GraphBuilder::stack_op(Bytecodes::Code code) {
1111   switch (code) {
1112     case Bytecodes::_pop:
1113       { state()->raw_pop();
1114       }
1115       break;
1116     case Bytecodes::_pop2:
1117       { state()->raw_pop();
1118         state()->raw_pop();
1119       }
1120       break;
1121     case Bytecodes::_dup:
1122       { Value w = state()->raw_pop();
1123         state()->raw_push(w);
1124         state()->raw_push(w);
1125       }
1126       break;
1127     case Bytecodes::_dup_x1:
1128       { Value w1 = state()->raw_pop();
1129         Value w2 = state()->raw_pop();

1274 
1275 
1276 void GraphBuilder::_goto(int from_bci, int to_bci) {
1277   Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
1278   if (is_profiling()) {
1279     compilation()->set_would_profile(true);
1280     x->set_profiled_bci(bci());
1281     if (profile_branches()) {
1282       x->set_profiled_method(method());
1283       x->set_should_profile(true);
1284     }
1285   }
1286   append(x);
1287 }
1288 
1289 
1290 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
1291   BlockBegin* tsux = block_at(stream()->get_dest());
1292   BlockBegin* fsux = block_at(stream()->next_bci());
1293   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();



























1294   // In case of loop invariant code motion or predicate insertion
1295   // before the body of a loop the state is needed
1296   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : nullptr, is_bb));
1297 
1298   assert(i->as_Goto() == nullptr ||
1299          (i->as_Goto()->sux_at(0) == tsux  && i->as_Goto()->is_safepoint() == (tsux->bci() < stream()->cur_bci())) ||
1300          (i->as_Goto()->sux_at(0) == fsux  && i->as_Goto()->is_safepoint() == (fsux->bci() < stream()->cur_bci())),
1301          "safepoint state of Goto returned by canonicalizer incorrect");
1302 
1303   if (is_profiling()) {
1304     If* if_node = i->as_If();
1305     if (if_node != nullptr) {
1306       // Note that we'd collect profile data in this method if we wanted it.
1307       compilation()->set_would_profile(true);
1308       // At level 2 we need the proper bci to count backedges
1309       if_node->set_profiled_bci(bci());
1310       if (profile_branches()) {
1311         // Successors can be rotated by the canonicalizer, check for this case.
1312         if_node->set_profiled_method(method());
1313         if_node->set_should_profile(true);
1314         if (if_node->tsux() == fsux) {
1315           if_node->set_swapped(true);
1316         }

1531   }
1532 
1533   if (needs_check) {
1534     // Perform the registration of finalizable objects.
1535     ValueStack* state_before = copy_state_for_exception();
1536     load_local(objectType, 0);
1537     append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
1538                                state()->pop_arguments(1),
1539                                true, state_before, true));
1540   }
1541 }
1542 
1543 
1544 void GraphBuilder::method_return(Value x, bool ignore_return) {
1545   if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1546     call_register_finalizer();
1547   }
1548 
1549   // The conditions for a memory barrier are described in Parse::do_exits().
1550   bool need_mem_bar = false;
1551   if (method()->name() == ciSymbols::object_initializer_name() &&
1552        (scope()->wrote_final() || scope()->wrote_stable() ||
1553          (AlwaysSafeConstructors && scope()->wrote_fields()) ||
1554          (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) {
1555     need_mem_bar = true;
1556   }
1557 
1558   BasicType bt = method()->return_type()->basic_type();
1559   switch (bt) {
1560     case T_BYTE:
1561     {
1562       Value shift = append(new Constant(new IntConstant(24)));
1563       x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1564       x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
1565       break;
1566     }
1567     case T_SHORT:
1568     {
1569       Value shift = append(new Constant(new IntConstant(16)));
1570       x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1571       x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
1572       break;

1682   // Attach dimension info to stable arrays.
1683   if (FoldStableValues &&
1684       field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) {
1685     ciArray* array = field_value.as_object()->as_array();
1686     jint dimension = field->type()->as_array_klass()->dimension();
1687     value = new StableArrayConstant(array, dimension);
1688   }
1689 
1690   switch (field_type) {
1691     case T_ARRAY:
1692     case T_OBJECT:
1693       if (field_value.as_object()->should_be_constant()) {
1694         return new Constant(value);
1695       }
1696       return nullptr; // Not a constant.
1697     default:
1698       return new Constant(value);
1699   }
1700 }
1701 























1702 void GraphBuilder::access_field(Bytecodes::Code code) {
1703   bool will_link;
1704   ciField* field = stream()->get_field(will_link);
1705   ciInstanceKlass* holder = field->holder();
1706   BasicType field_type = field->type()->basic_type();
1707   ValueType* type = as_ValueType(field_type);

1708   // call will_link again to determine if the field is valid.
1709   const bool needs_patching = !holder->is_loaded() ||
1710                               !field->will_link(method(), code) ||
1711                               PatchALot;
1712 
1713   ValueStack* state_before = nullptr;
1714   if (!holder->is_initialized() || needs_patching) {
1715     // save state before instruction for debug info when
1716     // deoptimization happens during patching
1717     state_before = copy_state_before();
1718   }
1719 
1720   Value obj = nullptr;
1721   if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
1722     if (state_before != nullptr) {
1723       // build a patching constant
1724       obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
1725     } else {
1726       obj = new Constant(new InstanceConstant(holder->java_mirror()));
1727     }
1728   }
1729 
1730   if (code == Bytecodes::_putfield) {
1731     scope()->set_wrote_fields();
1732     if (field->is_volatile()) {
1733       scope()->set_wrote_volatile();
1734     }
1735     if (field->is_final()) {
1736       scope()->set_wrote_final();
1737     }
1738     if (field->is_stable()) {
1739       scope()->set_wrote_stable();
1740     }
1741   }
1742 
1743   const int offset = !needs_patching ? field->offset_in_bytes() : -1;
1744   switch (code) {
1745     case Bytecodes::_getstatic: {
1746       // check for compile-time constants, i.e., initialized static final fields
1747       Value constant = nullptr;
1748       if (field->is_static_constant() && !PatchALot) {
1749         ciConstant field_value = field->constant_value();
1750         assert(!field->is_stable() || !field_value.is_null_or_zero(),
1751                "stable static w/ default value shouldn't be a constant");
1752         constant = make_constant(field_value, field);
1753       }
1754       if (constant != nullptr) {
1755         push(type, append(constant));
1756       } else {
1757         if (state_before == nullptr) {
1758           state_before = copy_state_for_exception();
1759         }
1760         push(type, append(new LoadField(append(obj), offset, field, true,
1761                                         state_before, needs_patching)));

1762       }
1763       break;
1764     }
1765     case Bytecodes::_putstatic: {
1766       Value val = pop(type);
1767       if (state_before == nullptr) {
1768         state_before = copy_state_for_exception();
1769       }
1770       if (field->type()->basic_type() == T_BOOLEAN) {
1771         Value mask = append(new Constant(new IntConstant(1)));
1772         val = append(new LogicOp(Bytecodes::_iand, val, mask));
1773       }










1774       append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
1775       break;
1776     }
1777     case Bytecodes::_getfield: {
1778       // Check for compile-time constants, i.e., trusted final non-static fields.
1779       Value constant = nullptr;
1780       obj = apop();
1781       ObjectType* obj_type = obj->type()->as_ObjectType();
1782       if (field->is_constant() && obj_type->is_constant() && !PatchALot) {
1783         ciObject* const_oop = obj_type->constant_value();
1784         if (!const_oop->is_null_object() && const_oop->is_loaded()) {
1785           ciConstant field_value = field->constant_value_of(const_oop);
1786           if (field_value.is_valid()) {
1787             constant = make_constant(field_value, field);
1788             // For CallSite objects add a dependency for invalidation of the optimization.
1789             if (field->is_call_site_target()) {
1790               ciCallSite* call_site = const_oop->as_call_site();
1791               if (!call_site->is_fully_initialized_constant_call_site()) {
1792                 ciMethodHandle* target = field_value.as_object()->as_method_handle();
1793                 dependency_recorder()->assert_call_site_target_value(call_site, target);







1794               }
1795             }
1796           }
1797         }
1798       }
1799       if (constant != nullptr) {
1800         push(type, append(constant));
1801       } else {
1802         if (state_before == nullptr) {
1803           state_before = copy_state_for_exception();
1804         }
1805         LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
1806         Value replacement = !needs_patching ? _memory->load(load) : load;
1807         if (replacement != load) {
1808           assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
1809           // Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing
1810           // conversion. Emit an explicit conversion here to get the correct field value after the write.
1811           BasicType bt = field->type()->basic_type();
1812           switch (bt) {
1813           case T_BOOLEAN:
1814           case T_BYTE:
1815             replacement = append(new Convert(Bytecodes::_i2b, replacement, as_ValueType(bt)));
1816             break;
1817           case T_CHAR:
1818             replacement = append(new Convert(Bytecodes::_i2c, replacement, as_ValueType(bt)));
1819             break;
1820           case T_SHORT:
1821             replacement = append(new Convert(Bytecodes::_i2s, replacement, as_ValueType(bt)));
1822             break;
1823           default:
1824             break;
1825           }
1826           push(type, replacement);























1827         } else {
1828           push(type, append(load));































































































1829         }
1830       }
1831       break;
1832     }
1833     case Bytecodes::_putfield: {
1834       Value val = pop(type);
1835       obj = apop();
1836       if (state_before == nullptr) {
1837         state_before = copy_state_for_exception();
1838       }
1839       if (field->type()->basic_type() == T_BOOLEAN) {
1840         Value mask = append(new Constant(new IntConstant(1)));
1841         val = append(new LogicOp(Bytecodes::_iand, val, mask));
1842       }
1843       StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching);
1844       if (!needs_patching) store = _memory->store(store);
1845       if (store != nullptr) {
1846         append(store);












































1847       }
1848       break;
1849     }
1850     default:
1851       ShouldNotReachHere();
1852       break;
1853   }
1854 }
1855 
1856 
1857 Dependencies* GraphBuilder::dependency_recorder() const {
1858   return compilation()->dependency_recorder();
1859 }
1860 
1861 // How many arguments do we want to profile?
1862 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) {
1863   int n = 0;
1864   bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
1865   start = has_receiver ? 1 : 0;
1866   if (profile_arguments()) {
1867     ciProfileData* data = method()->method_data()->bci_to_data(bci());
1868     if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) {
1869       n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
1870     }
1871   }
1872   // If we are inlining then we need to collect arguments to profile parameters for the target
1873   if (profile_parameters() && target != nullptr) {
1874     if (target->method_data() != nullptr && target->method_data()->parameters_type_data() != nullptr) {
1875       // The receiver is profiled on method entry so it's included in
1876       // the number of parameters but here we're only interested in

1952       break;
1953     case Bytecodes::_invokehandle:
1954       code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
1955       break;
1956     default:
1957       break;
1958     }
1959   } else {
1960     if (bc_raw == Bytecodes::_invokehandle) {
1961       assert(!will_link, "should come here only for unlinked call");
1962       code = Bytecodes::_invokespecial;
1963     }
1964   }
1965 
1966   if (code == Bytecodes::_invokespecial) {
1967     // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.
1968     ciKlass* receiver_constraint = nullptr;
1969 
1970     if (bc_raw == Bytecodes::_invokeinterface) {
1971       receiver_constraint = holder;
1972     } else if (bc_raw == Bytecodes::_invokespecial && !target->is_object_initializer() && calling_klass->is_interface()) {
1973       receiver_constraint = calling_klass;
1974     }
1975 
1976     if (receiver_constraint != nullptr) {
1977       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
1978       Value receiver = state()->stack_at(index);
1979       CheckCast* c = new CheckCast(receiver_constraint, receiver, copy_state_before());
1980       // go to uncommon_trap when checkcast fails
1981       c->set_invokespecial_receiver_check();
1982       state()->stack_at_put(index, append_split(c));
1983     }
1984   }
1985 
1986   // Push appendix argument (MethodType, CallSite, etc.), if one.
1987   bool patch_for_appendix = false;
1988   int patching_appendix_arg = 0;
1989   if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) {
1990     Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
1991     apush(arg);
1992     patch_for_appendix = true;

2142       }
2143     } else {
2144       print_inlining(target, "no static binding", /*success*/ false);
2145     }
2146   } else {
2147     print_inlining(target, "not inlineable", /*success*/ false);
2148   }
2149 
2150   // If we attempted an inline which did not succeed because of a
2151   // bailout during construction of the callee graph, the entire
2152   // compilation has to be aborted. This is fairly rare and currently
2153   // seems to only occur for jasm-generated classes which contain
2154   // jsr/ret pairs which are not associated with finally clauses and
2155   // do not have exception handlers in the containing method, and are
2156   // therefore not caught early enough to abort the inlining without
2157   // corrupting the graph. (We currently bail out with a non-empty
2158   // stack at a ret in these situations.)
2159   CHECK_BAILOUT();
2160 
2161   // inlining not successful => standard invoke
2162   ValueType* result_type = as_ValueType(declared_signature->return_type());
2163   ValueStack* state_before = copy_state_exhandling();
2164 
2165   // The bytecode (code) might change in this method so we are checking this very late.
2166   const bool has_receiver =
2167     code == Bytecodes::_invokespecial   ||
2168     code == Bytecodes::_invokevirtual   ||
2169     code == Bytecodes::_invokeinterface;
2170   Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
2171   Value recv = has_receiver ? apop() : nullptr;
2172 
2173   // A null check is required here (when there is a receiver) for any of the following cases
2174   // - invokespecial, always need a null check.
2175   // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized
2176   //   and require null checking. If the target is loaded a null check is emitted here.
2177   //   If the target isn't loaded the null check must happen after the call resolution. We achieve that
2178   //   by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry).
2179   //   (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may
2180   //   potentially fail, and can't have the null check before the resolution.)
2181   // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same
2182   //   reason as above, so calls with a receiver to unloaded targets can't be profiled.)

2191       null_check(recv);
2192     }
2193 
2194     if (is_profiling()) {
2195       // Note that we'd collect profile data in this method if we wanted it.
2196       compilation()->set_would_profile(true);
2197 
2198       if (profile_calls()) {
2199         assert(cha_monomorphic_target == nullptr || exact_target == nullptr, "both can not be set");
2200         ciKlass* target_klass = nullptr;
2201         if (cha_monomorphic_target != nullptr) {
2202           target_klass = cha_monomorphic_target->holder();
2203         } else if (exact_target != nullptr) {
2204           target_klass = exact_target->holder();
2205         }
2206         profile_call(target, recv, target_klass, collect_args_for_profiling(args, nullptr, false), false);
2207       }
2208     }
2209   }
2210 
2211   Invoke* result = new Invoke(code, result_type, recv, args, target, state_before);
2212   // push result
2213   append_split(result);
2214 
2215   if (result_type != voidType) {
2216     push(result_type, result);
2217   }
2218   if (profile_return() && result_type->is_object_kind()) {

2219     profile_return_type(result, target);
2220   }
2221 }
2222 
2223 
2224 void GraphBuilder::new_instance(int klass_index) {
2225   ValueStack* state_before = copy_state_exhandling();
2226   ciKlass* klass = stream()->get_klass();
2227   assert(klass->is_instance_klass(), "must be an instance klass");
2228   NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass());
2229   _memory->new_instance(new_instance);
2230   apush(append_split(new_instance));
2231 }
2232 
2233 
2234 void GraphBuilder::new_type_array() {
2235   ValueStack* state_before = copy_state_exhandling();
2236   apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true)));
2237 }
2238 
2239 
2240 void GraphBuilder::new_object_array() {
2241   ciKlass* klass = stream()->get_klass();
2242   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2243   NewArray* n = new NewObjectArray(klass, ipop(), state_before);
2244   apush(append_split(n));
2245 }
2246 
2247 
2248 bool GraphBuilder::direct_compare(ciKlass* k) {
2249   if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
2250     ciInstanceKlass* ik = k->as_instance_klass();
2251     if (ik->is_final()) {
2252       return true;
2253     } else {

2286   ciKlass* klass = stream()->get_klass();
2287   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2288   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
2289   ipush(append_split(i));
2290   i->set_direct_compare(direct_compare(klass));
2291 
2292   if (is_profiling()) {
2293     // Note that we'd collect profile data in this method if we wanted it.
2294     compilation()->set_would_profile(true);
2295 
2296     if (profile_checkcasts()) {
2297       i->set_profiled_method(method());
2298       i->set_profiled_bci(bci());
2299       i->set_should_profile(true);
2300     }
2301   }
2302 }
2303 
2304 
2305 void GraphBuilder::monitorenter(Value x, int bci) {



















2306   // save state before locking in case of deoptimization after a NullPointerException
2307   ValueStack* state_before = copy_state_for_exception_with_bci(bci);
2308   append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci);
2309   kill_all();
2310 }
2311 
2312 
2313 void GraphBuilder::monitorexit(Value x, int bci) {
2314   append_with_bci(new MonitorExit(x, state()->unlock()), bci);
2315   kill_all();
2316 }
2317 
2318 
2319 void GraphBuilder::new_multi_array(int dimensions) {
2320   ciKlass* klass = stream()->get_klass();
2321   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2322 
2323   Values* dims = new Values(dimensions, dimensions, nullptr);
2324   // fill in all dimensions
2325   int i = dimensions;
2326   while (i-- > 0) dims->at_put(i, ipop());
2327   // create array
2328   NewArray* n = new NewMultiArray(klass, dims, state_before);

2413 
2414 Instruction* GraphBuilder::append_split(StateSplit* instr) {
2415   return append_with_bci(instr, bci());
2416 }
2417 
2418 
2419 void GraphBuilder::null_check(Value value) {
2420   if (value->as_NewArray() != nullptr || value->as_NewInstance() != nullptr) {
2421     return;
2422   } else {
2423     Constant* con = value->as_Constant();
2424     if (con) {
2425       ObjectType* c = con->type()->as_ObjectType();
2426       if (c && c->is_loaded()) {
2427         ObjectConstant* oc = c->as_ObjectConstant();
2428         if (!oc || !oc->value()->is_null_object()) {
2429           return;
2430         }
2431       }
2432     }

2433   }
2434   append(new NullCheck(value, copy_state_for_exception()));
2435 }
2436 
2437 
2438 
2439 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
2440   if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != nullptr)) {
2441     assert(instruction->exception_state() == nullptr
2442            || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
2443            || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()),
2444            "exception_state should be of exception kind");
2445     return new XHandlers();
2446   }
2447 
2448   XHandlers*  exception_handlers = new XHandlers();
2449   ScopeData*  cur_scope_data = scope_data();
2450   ValueStack* cur_state = instruction->state_before();
2451   ValueStack* prev_state = nullptr;
2452   int scope_count = 0;
2453 
2454   assert(cur_state != nullptr, "state_before must be set");
2455   do {
2456     int cur_bci = cur_state->bci();
2457     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2458     assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");


2459 
2460     // join with all potential exception handlers
2461     XHandlers* list = cur_scope_data->xhandlers();
2462     const int n = list->length();
2463     for (int i = 0; i < n; i++) {
2464       XHandler* h = list->handler_at(i);
2465       if (h->covers(cur_bci)) {
2466         // h is a potential exception handler => join it
2467         compilation()->set_has_exception_handlers(true);
2468 
2469         BlockBegin* entry = h->entry_block();
2470         if (entry == block()) {
2471           // It's acceptable for an exception handler to cover itself
2472           // but we don't handle that in the parser currently.  It's
2473           // very rare so we bailout instead of trying to handle it.
2474           BAILOUT_("exception handler covers itself", exception_handlers);
2475         }
2476         assert(entry->bci() == h->handler_bci(), "must match");
2477         assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
2478 

3252     state->store_local(idx, new Local(type, vt, idx, false));
3253     idx += type->size();
3254   }
3255 
3256   // lock synchronized method
3257   if (method()->is_synchronized()) {
3258     state->lock(nullptr);
3259   }
3260 
3261   return state;
3262 }
3263 
3264 
3265 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
3266   : _scope_data(nullptr)
3267   , _compilation(compilation)
3268   , _memory(new MemoryBuffer())
3269   , _inline_bailout_msg(nullptr)
3270   , _instruction_count(0)
3271   , _osr_entry(nullptr)


3272 {
3273   int osr_bci = compilation->osr_bci();
3274 
3275   // determine entry points and bci2block mapping
3276   BlockListBuilder blm(compilation, scope, osr_bci);
3277   CHECK_BAILOUT();
3278 
3279   BlockList* bci2block = blm.bci2block();
3280   BlockBegin* start_block = bci2block->at(0);
3281 
3282   push_root_scope(scope, bci2block, start_block);
3283 
3284   // setup state for std entry
3285   _initial_state = state_at_entry();
3286   start_block->merge(_initial_state, compilation->has_irreducible_loops());
3287 
3288   // End nulls still exist here
3289 
3290   // complete graph
3291   _vmap        = new ValueMap();

4004   // Temporarily set up bytecode stream so we can append instructions
4005   // (only using the bci of this stream)
4006   scope_data()->set_stream(scope_data()->parent()->stream());
4007 
4008   // Pass parameters into callee state: add assignments
4009   // note: this will also ensure that all arguments are computed before being passed
4010   ValueStack* callee_state = state();
4011   ValueStack* caller_state = state()->caller_state();
4012   for (int i = args_base; i < caller_state->stack_size(); ) {
4013     const int arg_no = i - args_base;
4014     Value arg = caller_state->stack_at_inc(i);
4015     store_local(callee_state, arg, arg_no);
4016   }
4017 
4018   // Remove args from stack.
4019   // Note that we preserve locals state in case we can use it later
4020   // (see use of pop_scope() below)
4021   caller_state->truncate_stack(args_base);
4022   assert(callee_state->stack_size() == 0, "callee stack must be empty");
4023 




























4024   Value lock = nullptr;
4025   BlockBegin* sync_handler = nullptr;
4026 
4027   // Inline the locking of the receiver if the callee is synchronized
4028   if (callee->is_synchronized()) {
4029     lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
4030                                : state()->local_at(0);
4031     sync_handler = new BlockBegin(SynchronizationEntryBCI);
4032     inline_sync_entry(lock, sync_handler);
4033   }
4034 
4035   if (compilation()->env()->dtrace_method_probes()) {
4036     Values* args = new Values(1);
4037     args->push(append(new Constant(new MethodConstant(method()))));
4038     append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
4039   }
4040 
4041   if (profile_inlined_calls()) {
4042     profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI));
4043   }

   1 /*
   2  * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "c1/c1_Canonicalizer.hpp"
  26 #include "c1/c1_CFGPrinter.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_GraphBuilder.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_InstructionPrinter.hpp"
  31 #include "c1/c1_ValueType.hpp"
  32 #include "ci/ciCallSite.hpp"
  33 #include "ci/ciField.hpp"
  34 #include "ci/ciFlatArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciKlass.hpp"
  37 #include "ci/ciMemberName.hpp"
  38 #include "ci/ciSymbols.hpp"
  39 #include "ci/ciUtilities.inline.hpp"
  40 #include "classfile/javaClasses.hpp"
  41 #include "compiler/compilationPolicy.hpp"
  42 #include "compiler/compileBroker.hpp"
  43 #include "compiler/compilerEvent.hpp"
  44 #include "interpreter/bytecode.hpp"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "utilities/checkedCast.hpp"
  50 #include "utilities/macros.hpp"
  51 #if INCLUDE_JFR
  52 #include "jfr/jfr.hpp"
  53 #endif
  54 
  55 class BlockListBuilder {
  56  private:
  57   Compilation* _compilation;
  58   IRScope*     _scope;
  59 
  60   BlockList    _blocks;                // internal list of all blocks
  61   BlockList*   _bci2block;             // mapping from bci to blocks for GraphBuilder
  62   GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend
  63 
  64   // fields used by mark_loops
  65   ResourceBitMap _active;              // for iteration of control flow graph
  66   ResourceBitMap _visited;             // for iteration of control flow graph
  67   GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop

1035       // they are using this local. We don't handle skipping over a
1036       // ret.
1037       for (ScopeData* cur_scope_data = scope_data()->parent();
1038            cur_scope_data != nullptr && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
1039            cur_scope_data = cur_scope_data->parent()) {
1040         if (cur_scope_data->jsr_return_address_local() == index) {
1041           BAILOUT("subroutine overwrites return address from previous subroutine");
1042         }
1043       }
1044     } else if (index == scope_data()->jsr_return_address_local()) {
1045       scope_data()->set_jsr_return_address_local(-1);
1046     }
1047   }
1048 
1049   state->store_local(index, x);
1050 }
1051 
1052 
1053 void GraphBuilder::load_indexed(BasicType type) {
1054   // In case of in block code motion in range check elimination
1055   ValueStack* state_before = nullptr;
1056   int array_idx = state()->stack_size() - 2;
1057   if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) {
1058     // Save the entire state and re-execute on deopt when accessing flat arrays
1059     state_before = copy_state_before();
1060     state_before->set_should_reexecute(true);
1061   } else {
1062     state_before = copy_state_indexed_access();
1063   }
1064   compilation()->set_has_access_indexed(true);
1065   Value index = ipop();
1066   Value array = apop();
1067   Value length = nullptr;
1068   if (CSEArrayLength ||
1069       (array->as_Constant() != nullptr) ||
1070       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1071       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1072       (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1073     length = append(new ArrayLength(array, state_before));
1074   }
1075 
1076   bool need_membar = false;
1077   LoadIndexed* load_indexed = nullptr;
1078   Instruction* result = nullptr;
1079   if (array->is_loaded_flat_array()) {
1080     ciType* array_type = array->declared_type();
1081     ciFlatArrayKlass* array_klass = array_type->as_flat_array_klass();
1082     ciInlineKlass* elem_klass = array_klass->element_klass()->as_inline_klass();
1083 
1084     bool can_delay_access = false;
1085     ciBytecodeStream s(method());
1086     s.force_bci(bci());
1087     s.next();
1088     if (s.cur_bc() == Bytecodes::_getfield) {
1089       bool is_null_free = array_klass->is_elem_null_free();
1090       bool will_link;
1091       ciField* next_field = s.get_field(will_link);
1092       bool next_needs_patching = !next_field->holder()->is_initialized() ||
1093                                  !next_field->will_link(method(), Bytecodes::_getfield) ||
1094                                  PatchALot;
1095       bool needs_atomic_access = array_klass->is_elem_atomic();
1096       can_delay_access = is_null_free && C1UseDelayedFlattenedFieldReads &&
1097                          !next_needs_patching && !needs_atomic_access;
1098     }
1099     if (can_delay_access) {
1100       // potentially optimizable array access, storing information for delayed decision
1101       LoadIndexed* li = new LoadIndexed(array, index, length, type, state_before);
1102       DelayedLoadIndexed* dli = new DelayedLoadIndexed(li, state_before);
1103       li->set_delayed(dli);
1104       set_pending_load_indexed(dli);
1105       return; // Nothing else to do for now
1106     } else {
1107       NewInstance* buffer = new NewInstance(elem_klass, state_before, false, true);
1108       buffer->set_null_free(true);
1109       _memory->new_instance(buffer);
1110       result = append_split(buffer);
1111       load_indexed = new LoadIndexed(array, index, length, type, state_before);
1112       load_indexed->set_buffer(buffer);
1113       // The LoadIndexed node will initialize this instance by copying from
1114       // the flat field.  Ensure these stores are visible before any
1115       // subsequent store that publishes this reference.
1116       need_membar = true;
1117     }
1118   } else {
1119     load_indexed = new LoadIndexed(array, index, length, type, state_before);
1120     if (profile_array_accesses() && is_reference_type(type)) {
1121       compilation()->set_would_profile(true);
1122       load_indexed->set_should_profile(true);
1123       load_indexed->set_profiled_method(method());
1124       load_indexed->set_profiled_bci(bci());
1125     }
1126   }
1127   result = append(load_indexed);
1128   if (need_membar) {
1129     append(new MemBar(lir_membar_storestore));
1130   }
1131   assert(!load_indexed->should_profile() || load_indexed == result, "should not be optimized out");
1132   push(as_ValueType(type), result);
1133 }
1134 
1135 
1136 void GraphBuilder::store_indexed(BasicType type) {
1137   // In case of in block code motion in range check elimination
1138   ValueStack* state_before = nullptr;
1139   int array_idx = state()->stack_size() - 3;
1140   if (type == T_OBJECT && state()->stack_at(array_idx)->maybe_flat_array()) {
1141     // Save the entire state and re-execute on deopt when accessing flat arrays
1142     state_before = copy_state_before();
1143     state_before->set_should_reexecute(true);
1144   } else {
1145     state_before = copy_state_indexed_access();
1146   }
1147   compilation()->set_has_access_indexed(true);
1148   Value value = pop(as_ValueType(type));
1149   Value index = ipop();
1150   Value array = apop();
1151   Value length = nullptr;
1152   if (CSEArrayLength ||
1153       (array->as_Constant() != nullptr) ||
1154       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
1155       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
1156       (array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
1157     length = append(new ArrayLength(array, state_before));
1158   }
1159   ciType* array_type = array->declared_type();
1160   bool check_boolean = false;
1161   if (array_type != nullptr) {
1162     if (array_type->is_loaded() &&
1163       array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) {
1164       assert(type == T_BYTE, "boolean store uses bastore");
1165       Value mask = append(new Constant(new IntConstant(1)));
1166       value = append(new LogicOp(Bytecodes::_iand, value, mask));
1167     }
1168   } else if (type == T_BYTE) {
1169     check_boolean = true;
1170   }



1171 
1172   StoreIndexed* store_indexed = new StoreIndexed(array, index, length, type, value, state_before, check_boolean);
1173   if (profile_array_accesses() && is_reference_type(type) && !array->is_loaded_flat_array()) {
1174     compilation()->set_would_profile(true);
1175     store_indexed->set_should_profile(true);
1176     store_indexed->set_profiled_method(method());
1177     store_indexed->set_profiled_bci(bci());



1178   }
1179   Instruction* result = append(store_indexed);
1180   assert(!store_indexed->should_profile() || store_indexed == result, "should not be optimized out");
1181   _memory->store_value(value);
1182 }
1183 

1184 void GraphBuilder::stack_op(Bytecodes::Code code) {
1185   switch (code) {
1186     case Bytecodes::_pop:
1187       { state()->raw_pop();
1188       }
1189       break;
1190     case Bytecodes::_pop2:
1191       { state()->raw_pop();
1192         state()->raw_pop();
1193       }
1194       break;
1195     case Bytecodes::_dup:
1196       { Value w = state()->raw_pop();
1197         state()->raw_push(w);
1198         state()->raw_push(w);
1199       }
1200       break;
1201     case Bytecodes::_dup_x1:
1202       { Value w1 = state()->raw_pop();
1203         Value w2 = state()->raw_pop();

1348 
1349 
1350 void GraphBuilder::_goto(int from_bci, int to_bci) {
1351   Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
1352   if (is_profiling()) {
1353     compilation()->set_would_profile(true);
1354     x->set_profiled_bci(bci());
1355     if (profile_branches()) {
1356       x->set_profiled_method(method());
1357       x->set_should_profile(true);
1358     }
1359   }
1360   append(x);
1361 }
1362 
1363 
1364 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
1365   BlockBegin* tsux = block_at(stream()->get_dest());
1366   BlockBegin* fsux = block_at(stream()->next_bci());
1367   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
1368 
1369   bool subst_check = false;
1370   if (Arguments::is_valhalla_enabled() && (stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne)) {
1371     ValueType* left_vt = x->type();
1372     ValueType* right_vt = y->type();
1373     if (left_vt->is_object()) {
1374       assert(right_vt->is_object(), "must be");
1375       ciKlass* left_klass = x->as_loaded_klass_or_null();
1376       ciKlass* right_klass = y->as_loaded_klass_or_null();
1377 
1378       if (left_klass == nullptr || right_klass == nullptr) {
1379         // The klass is still unloaded, or came from a Phi node. Go slow case;
1380         subst_check = true;
1381       } else if (left_klass->can_be_inline_klass() || right_klass->can_be_inline_klass()) {
1382         // Either operand may be a value object, but we're not sure. Go slow case;
1383         subst_check = true;
1384       } else {
1385         // No need to do substitutability check
1386       }
1387     }
1388   }
1389   if ((stream()->cur_bc() == Bytecodes::_if_acmpeq || stream()->cur_bc() == Bytecodes::_if_acmpne) &&
1390       is_profiling() && profile_branches()) {
1391     compilation()->set_would_profile(true);
1392     append(new ProfileACmpTypes(method(), bci(), x, y));
1393   }
1394 
1395   // In case of loop invariant code motion or predicate insertion
1396   // before the body of a loop the state is needed
1397   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic() || subst_check) ? state_before : nullptr, is_bb, subst_check));
1398 
1399   assert(i->as_Goto() == nullptr ||
1400          (i->as_Goto()->sux_at(0) == tsux  && i->as_Goto()->is_safepoint() == (tsux->bci() < stream()->cur_bci())) ||
1401          (i->as_Goto()->sux_at(0) == fsux  && i->as_Goto()->is_safepoint() == (fsux->bci() < stream()->cur_bci())),
1402          "safepoint state of Goto returned by canonicalizer incorrect");
1403 
1404   if (is_profiling()) {
1405     If* if_node = i->as_If();
1406     if (if_node != nullptr) {
1407       // Note that we'd collect profile data in this method if we wanted it.
1408       compilation()->set_would_profile(true);
1409       // At level 2 we need the proper bci to count backedges
1410       if_node->set_profiled_bci(bci());
1411       if (profile_branches()) {
1412         // Successors can be rotated by the canonicalizer, check for this case.
1413         if_node->set_profiled_method(method());
1414         if_node->set_should_profile(true);
1415         if (if_node->tsux() == fsux) {
1416           if_node->set_swapped(true);
1417         }

1632   }
1633 
1634   if (needs_check) {
1635     // Perform the registration of finalizable objects.
1636     ValueStack* state_before = copy_state_for_exception();
1637     load_local(objectType, 0);
1638     append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
1639                                state()->pop_arguments(1),
1640                                true, state_before, true));
1641   }
1642 }
1643 
1644 
1645 void GraphBuilder::method_return(Value x, bool ignore_return) {
1646   if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1647     call_register_finalizer();
1648   }
1649 
1650   // The conditions for a memory barrier are described in Parse::do_exits().
1651   bool need_mem_bar = false;
1652   if (method()->is_object_constructor() &&
1653        (scope()->wrote_non_strict_final() || scope()->wrote_stable() ||
1654          (AlwaysSafeConstructors && scope()->wrote_fields()) ||
1655          (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) {
1656     need_mem_bar = true;
1657   }
1658 
1659   BasicType bt = method()->return_type()->basic_type();
1660   switch (bt) {
1661     case T_BYTE:
1662     {
1663       Value shift = append(new Constant(new IntConstant(24)));
1664       x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1665       x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
1666       break;
1667     }
1668     case T_SHORT:
1669     {
1670       Value shift = append(new Constant(new IntConstant(16)));
1671       x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
1672       x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
1673       break;

1783   // Attach dimension info to stable arrays.
1784   if (FoldStableValues &&
1785       field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) {
1786     ciArray* array = field_value.as_object()->as_array();
1787     jint dimension = field->type()->as_array_klass()->dimension();
1788     value = new StableArrayConstant(array, dimension);
1789   }
1790 
1791   switch (field_type) {
1792     case T_ARRAY:
1793     case T_OBJECT:
1794       if (field_value.as_object()->should_be_constant()) {
1795         return new Constant(value);
1796       }
1797       return nullptr; // Not a constant.
1798     default:
1799       return new Constant(value);
1800   }
1801 }
1802 
1803 void GraphBuilder::copy_inline_content(ciInlineKlass* vk, Value src, int src_off, Value dest, int dest_off, ValueStack* state_before, ciField* enclosing_field) {
1804   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1805     ciField* field = vk->declared_nonstatic_field_at(i);
1806     int offset = field->offset_in_bytes() - vk->payload_offset();
1807     if (field->is_flat()) {
1808       copy_inline_content(field->type()->as_inline_klass(), src, src_off + offset, dest, dest_off + offset, state_before, enclosing_field);
1809       if (!field->is_null_free()) {
1810         // Nullable, copy the null marker using Unsafe because null markers are no real fields
1811         int null_marker_offset = field->null_marker_offset() - vk->payload_offset();
1812         Value offset = append(new Constant(new LongConstant(src_off + null_marker_offset)));
1813         Value nm = append(new UnsafeGet(T_BOOLEAN, src, offset, false));
1814         offset = append(new Constant(new LongConstant(dest_off + null_marker_offset)));
1815         append(new UnsafePut(T_BOOLEAN, dest, offset, nm, false));
1816       }
1817     } else {
1818       Value value = append(new LoadField(src, src_off + offset, field, false, state_before, false));
1819       StoreField* store = new StoreField(dest, dest_off + offset, field, value, false, state_before, false);
1820       store->set_enclosing_field(enclosing_field);
1821       append(store);
1822     }
1823   }
1824 }
1825 
1826 void GraphBuilder::access_field(Bytecodes::Code code) {
1827   bool will_link;
1828   ciField* field = stream()->get_field(will_link);
1829   ciInstanceKlass* holder = field->holder();
1830   BasicType field_basic_type = field->type()->basic_type();
1831   ValueType* type = as_ValueType(field_basic_type);
1832 
1833   // call will_link again to determine if the field is valid.
1834   const bool needs_patching = !holder->is_loaded() ||
1835                               !field->will_link(method(), code) ||
1836                               (!field->is_flat() && PatchALot);
1837 
1838   ValueStack* state_before = nullptr;
1839   if (!holder->is_initialized() || needs_patching) {
1840     // save state before instruction for debug info when
1841     // deoptimization happens during patching
1842     state_before = copy_state_before();
1843   }
1844 
1845   Value obj = nullptr;
1846   if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
1847     if (state_before != nullptr) {
1848       // build a patching constant
1849       obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
1850     } else {
1851       obj = new Constant(new InstanceConstant(holder->java_mirror()));
1852     }
1853   }
1854 
1855   if (code == Bytecodes::_putfield) {
1856     scope()->set_wrote_fields();
1857     if (field->is_volatile()) {
1858       scope()->set_wrote_volatile();
1859     }
1860     if (field->is_final() && !field->is_strict()) {
1861       scope()->set_wrote_non_strict_final();
1862     }
1863     if (field->is_stable()) {
1864       scope()->set_wrote_stable();
1865     }
1866   }
1867 
1868   int offset = !needs_patching ? field->offset_in_bytes() : -1;
1869   switch (code) {
1870     case Bytecodes::_getstatic: {
1871       // check for compile-time constants, i.e., initialized static final fields
1872       Value constant = nullptr;
1873       if (field->is_static_constant() && !PatchALot) {
1874         ciConstant field_value = field->constant_value();
1875         assert(!field->is_stable() || !field_value.is_null_or_zero(),
1876                "stable static w/ default value shouldn't be a constant");
1877         constant = make_constant(field_value, field);
1878       }
1879       if (constant != nullptr) {
1880         push(type, append(constant));
1881       } else {
1882         if (state_before == nullptr) {
1883           state_before = copy_state_for_exception();
1884         }
1885         LoadField* load_field = new LoadField(append(obj), offset, field, true,
1886                                         state_before, needs_patching);
1887         push(type, append(load_field));
1888       }
1889       break;
1890     }
1891     case Bytecodes::_putstatic: {
1892       Value val = pop(type);
1893       if (state_before == nullptr) {
1894         state_before = copy_state_for_exception();
1895       }
1896       if (field_basic_type == T_BOOLEAN) {
1897         Value mask = append(new Constant(new IntConstant(1)));
1898         val = append(new LogicOp(Bytecodes::_iand, val, mask));
1899       }
1900       if (field->is_null_free()) {
1901         null_check(val);
1902 
1903         ciType* field_type = field->type();
1904         if (field_type->is_loaded() && field_type->is_inlinetype() && field_type->as_inline_klass()->is_empty() &&
1905             (!method()->is_class_initializer() || field->is_flat())) {
1906           // Storing to a field of an empty, null-free inline type that is already initialized. Ignore.
1907           break;
1908         }
1909       }
1910       append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
1911       break;
1912     }
1913     case Bytecodes::_getfield: {
1914       // Check for compile-time constants, i.e., trusted final non-static fields.
1915       Value constant = nullptr;
1916       if (state_before == nullptr && field->is_flat()) {
1917         // Save the entire state and re-execute on deopt when accessing flat fields
1918         assert(Interpreter::bytecode_should_reexecute(code), "should reexecute");
1919         state_before = copy_state_before();
1920       }
1921       if (!has_pending_field_access() && !has_pending_load_indexed()) {
1922         obj = apop();
1923         ObjectType* obj_type = obj->type()->as_ObjectType();
1924         if (field->is_constant() && !field->is_flat() && obj_type->is_constant() && !PatchALot) {
1925           ciObject* const_oop = obj_type->constant_value();
1926           if (!const_oop->is_null_object() && const_oop->is_loaded()) {
1927             ciConstant field_value = field->constant_value_of(const_oop);
1928             if (field_value.is_valid()) {
1929               constant = make_constant(field_value, field);
1930               // For CallSite objects add a dependency for invalidation of the optimization.
1931               if (field->is_call_site_target()) {
1932                 ciCallSite* call_site = const_oop->as_call_site();
1933                 if (!call_site->is_fully_initialized_constant_call_site()) {
1934                   ciMethodHandle* target = field_value.as_object()->as_method_handle();
1935                   dependency_recorder()->assert_call_site_target_value(call_site, target);
1936                 }
1937               }
1938             }
1939           }
1940         }
1941       }
1942       if (constant != nullptr) {
1943         push(type, append(constant));
1944       } else {
1945         if (state_before == nullptr) {
1946           state_before = copy_state_for_exception();
1947         }
1948         if (!field->is_flat()) {
1949           if (has_pending_field_access()) {
1950             assert(!needs_patching, "Can't patch delayed field access");
1951             obj = pending_field_access()->obj();
1952             offset += pending_field_access()->offset() - field->holder()->as_inline_klass()->payload_offset();
1953             field = pending_field_access()->holder()->get_field_by_offset(offset, false);
1954             assert(field != nullptr, "field not found");
1955             set_pending_field_access(nullptr);
1956           } else if (has_pending_load_indexed()) {
1957             assert(!needs_patching, "Can't patch delayed field access");
1958             pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
1959             LoadIndexed* li = pending_load_indexed()->load_instr();
1960             li->set_type(type);
1961             push(type, append(li));
1962             set_pending_load_indexed(nullptr);




1963             break;
1964           }
1965           LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
1966           Value replacement = !needs_patching ? _memory->load(load) : load;
1967           if (replacement != load) {
1968             assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
1969             // Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing
1970             // conversion. Emit an explicit conversion here to get the correct field value after the write.
1971             switch (field_basic_type) {
1972             case T_BOOLEAN:
1973             case T_BYTE:
1974               replacement = append(new Convert(Bytecodes::_i2b, replacement, type));
1975               break;
1976             case T_CHAR:
1977               replacement = append(new Convert(Bytecodes::_i2c, replacement, type));
1978               break;
1979             case T_SHORT:
1980               replacement = append(new Convert(Bytecodes::_i2s, replacement, type));
1981               break;
1982             default:
1983               break;
1984             }
1985             push(type, replacement);
1986           } else {
1987             push(type, append(load));
1988           }
1989         } else {
1990           // Flat field
1991           assert(!needs_patching, "Can't patch flat inline type field access");
1992           ciInlineKlass* inline_klass = field->type()->as_inline_klass();
1993           if (field->is_atomic()) {
1994             assert(!has_pending_field_access(), "Pending field accesses are not supported");
1995             LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
1996             push(type, append(load));
1997           } else {
1998             // Look at the next bytecode to check if we can delay the field access
1999             bool can_delay_access = false;
2000             if (field->is_null_free()) {
2001               ciBytecodeStream s(method());
2002               s.force_bci(bci());
2003               s.next();
2004               if (s.cur_bc() == Bytecodes::_getfield && !needs_patching) {
2005                 ciField* next_field = s.get_field(will_link);
2006                 bool next_needs_patching = !next_field->holder()->is_loaded() ||
2007                                           !next_field->will_link(method(), Bytecodes::_getfield) ||
2008                                           PatchALot;
2009                 // We can't update the offset for atomic accesses
2010                 bool next_needs_atomic_access = next_field->is_flat() && next_field->is_atomic();
2011                 can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching && !next_needs_atomic_access && next_field->is_null_free();
2012               }
2013             }
2014 
2015             if (can_delay_access) {
2016               if (has_pending_load_indexed()) {
2017                 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
2018               } else if (has_pending_field_access()) {
2019                 pending_field_access()->inc_offset(offset - field->holder()->as_inline_klass()->payload_offset());
2020               } else {
2021                 null_check(obj);
2022                 DelayedFieldAccess* dfa = new DelayedFieldAccess(obj, field->holder(), field->offset_in_bytes(), state_before);
2023                 set_pending_field_access(dfa);
2024               }
2025             } else {
2026               if (!field->is_strict()) {
2027                 scope()->set_wrote_non_strict_final();
2028               }
2029               scope()->set_wrote_fields();
2030               if (has_pending_load_indexed()) {
2031                 assert(field->is_null_free(), "nullable fields do not support delayed accesses yet");
2032                 assert(!needs_patching, "Can't patch delayed field access");
2033                 pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
2034                 NewInstance* buffer = new NewInstance(inline_klass, pending_load_indexed()->state_before(), false, true);
2035                 buffer->set_null_free(true);
2036                 _memory->new_instance(buffer);
2037                 pending_load_indexed()->load_instr()->set_buffer(buffer);
2038                 apush(append_split(buffer));
2039                 append(pending_load_indexed()->load_instr());
2040                 set_pending_load_indexed(nullptr);
2041               } else if (has_pending_field_access()) {
2042                 assert(field->is_null_free(), "nullable fields do not support delayed accesses yet");
2043                 state_before = pending_field_access()->state_before();
2044                 NewInstance* buffer = new NewInstance(inline_klass, state_before, false, true);
2045                 _memory->new_instance(buffer);
2046                 apush(append_split(buffer));
2047                 copy_inline_content(inline_klass, pending_field_access()->obj(),
2048                                     pending_field_access()->offset() + field->offset_in_bytes() - field->holder()->as_inline_klass()->payload_offset(),
2049                                     buffer, inline_klass->payload_offset(), state_before);
2050                 set_pending_field_access(nullptr);
2051               } else {
2052                 if (!field->is_null_free() && !inline_klass->is_initialized()) {
2053                   // Cannot allocate an instance of inline_klass because it may have not been
2054                   // initialized, bailout for now
2055                   bailout("load from an uninitialized nullable non-atomic flat field");
2056                   return;
2057                 }
2058 
2059                 NewInstance* buffer = new NewInstance(inline_klass, state_before, false, true);
2060                 _memory->new_instance(buffer);
2061                 append_split(buffer);
2062 
2063                 if (inline_klass->is_initialized() && inline_klass->is_empty()) {
2064                   // Needs an explicit null check because below code does not perform any actual load if there are no fields
2065                   null_check(obj);
2066                 }
2067                 copy_inline_content(inline_klass, obj, field->offset_in_bytes(), buffer, inline_klass->payload_offset(), state_before);
2068 
2069                 Instruction* result = buffer;
2070                 if (!field->is_null_free()) {
2071                   Value int_zero = append(new Constant(intZero));
2072                   Value object_null = append(new Constant(objectNull));
2073                   Value nm_offset = append(new Constant(new LongConstant(offset + inline_klass->null_marker_offset_in_payload())));
2074                   Value nm = append(new UnsafeGet(T_BOOLEAN, obj, nm_offset, false));
2075                   result = append(new IfOp(nm, Instruction::neq, int_zero, buffer, object_null, state_before, false));
2076                 }
2077                 apush(result);
2078               }
2079 
2080               // If we allocated a new instance ensure the stores to copy the
2081               // field contents are visible before any subsequent store that
2082               // publishes this reference.
2083               append(new MemBar(lir_membar_storestore));
2084             }
2085           }
2086         }
2087       }
2088       break;
2089     }
2090     case Bytecodes::_putfield: {
2091       Value val = pop(type);
2092       obj = apop();
2093       if (state_before == nullptr) {
2094         state_before = copy_state_for_exception();
2095       }
2096       if (field_basic_type == T_BOOLEAN) {
2097         Value mask = append(new Constant(new IntConstant(1)));
2098         val = append(new LogicOp(Bytecodes::_iand, val, mask));
2099       }
2100 
2101       ciType* field_type = field->type();
2102       if (field->is_null_free() && field_type->is_loaded() && field_type->is_inlinetype() &&
2103           field_type->as_inline_klass()->is_empty() && (!method()->is_object_constructor() || field->is_flat())) {
2104         // Storing to a field of an empty, null-free inline type that is already initialized. Ignore.
2105         null_check(obj);
2106         null_check(val);
2107       } else if (!field->is_flat()) {
2108         if (field->is_null_free()) {
2109           null_check(val);
2110         }
2111         StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching);
2112         if (!needs_patching) store = _memory->store(store);
2113         if (store != nullptr) {
2114           append(store);
2115         }
2116       } else {
2117         // Flat field
2118         assert(!needs_patching, "Can't patch flat inline type field access");
2119         ciInlineKlass* inline_klass = field_type->as_inline_klass();
2120         if (field->is_atomic()) {
2121           if (field->is_null_free()) {
2122             null_check(val);
2123           }
2124           append(new StoreField(obj, offset, field, val, false, state_before, needs_patching));
2125         } else if (field->is_null_free()) {
2126           assert(!inline_klass->is_empty(), "should have been handled");
2127           copy_inline_content(inline_klass, val, inline_klass->payload_offset(), obj, offset, state_before, field);
2128         } else {
2129           if (!inline_klass->is_initialized()) {
2130             // null_reset_value is not available, bailout for now
2131             bailout("store to an uninitialized nullable non-atomic flat field");
2132             return;
2133           }
2134 
2135           // Store the subfields when field is a nullable non-atomic field
2136           Value object_null = append(new Constant(objectNull));
2137           Value null_reset_value = append(new Constant(new ObjectConstant(inline_klass->get_null_reset_value().as_object())));
2138           Value src = append(new IfOp(val, Instruction::neq, object_null, val, null_reset_value, state_before, false));
2139           copy_inline_content(inline_klass, src, inline_klass->payload_offset(), obj, offset, state_before);
2140 
2141           // Store the null marker
2142           Value int_one = append(new Constant(new IntConstant(1)));
2143           Value int_zero = append(new Constant(intZero));
2144           Value nm = append(new IfOp(val, Instruction::neq, object_null, int_one, int_zero, state_before, false));
2145           Value nm_offset = append(new Constant(new LongConstant(offset + inline_klass->null_marker_offset_in_payload())));
2146           append(new UnsafePut(T_BOOLEAN, obj, nm_offset, nm, false));
2147         }
2148       }
2149       break;
2150     }
2151     default:
2152       ShouldNotReachHere();
2153       break;
2154   }
2155 }
2156 

2157 Dependencies* GraphBuilder::dependency_recorder() const {
2158   return compilation()->dependency_recorder();
2159 }
2160 
2161 // How many arguments do we want to profile?
2162 Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) {
2163   int n = 0;
2164   bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
2165   start = has_receiver ? 1 : 0;
2166   if (profile_arguments()) {
2167     ciProfileData* data = method()->method_data()->bci_to_data(bci());
2168     if (data != nullptr && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) {
2169       n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
2170     }
2171   }
2172   // If we are inlining then we need to collect arguments to profile parameters for the target
2173   if (profile_parameters() && target != nullptr) {
2174     if (target->method_data() != nullptr && target->method_data()->parameters_type_data() != nullptr) {
2175       // The receiver is profiled on method entry so it's included in
2176       // the number of parameters but here we're only interested in

2252       break;
2253     case Bytecodes::_invokehandle:
2254       code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
2255       break;
2256     default:
2257       break;
2258     }
2259   } else {
2260     if (bc_raw == Bytecodes::_invokehandle) {
2261       assert(!will_link, "should come here only for unlinked call");
2262       code = Bytecodes::_invokespecial;
2263     }
2264   }
2265 
2266   if (code == Bytecodes::_invokespecial) {
2267     // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.
2268     ciKlass* receiver_constraint = nullptr;
2269 
2270     if (bc_raw == Bytecodes::_invokeinterface) {
2271       receiver_constraint = holder;
2272     } else if (bc_raw == Bytecodes::_invokespecial && !target->is_object_constructor() && calling_klass->is_interface()) {
2273       receiver_constraint = calling_klass;
2274     }
2275 
2276     if (receiver_constraint != nullptr) {
2277       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
2278       Value receiver = state()->stack_at(index);
2279       CheckCast* c = new CheckCast(receiver_constraint, receiver, copy_state_before());
2280       // go to uncommon_trap when checkcast fails
2281       c->set_invokespecial_receiver_check();
2282       state()->stack_at_put(index, append_split(c));
2283     }
2284   }
2285 
2286   // Push appendix argument (MethodType, CallSite, etc.), if one.
2287   bool patch_for_appendix = false;
2288   int patching_appendix_arg = 0;
2289   if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) {
2290     Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
2291     apush(arg);
2292     patch_for_appendix = true;

2442       }
2443     } else {
2444       print_inlining(target, "no static binding", /*success*/ false);
2445     }
2446   } else {
2447     print_inlining(target, "not inlineable", /*success*/ false);
2448   }
2449 
2450   // If we attempted an inline which did not succeed because of a
2451   // bailout during construction of the callee graph, the entire
2452   // compilation has to be aborted. This is fairly rare and currently
2453   // seems to only occur for jasm-generated classes which contain
2454   // jsr/ret pairs which are not associated with finally clauses and
2455   // do not have exception handlers in the containing method, and are
2456   // therefore not caught early enough to abort the inlining without
2457   // corrupting the graph. (We currently bail out with a non-empty
2458   // stack at a ret in these situations.)
2459   CHECK_BAILOUT();
2460 
2461   // inlining not successful => standard invoke
2462   ciType* return_type = declared_signature->return_type();
2463   ValueStack* state_before = copy_state_exhandling();
2464 
2465   // The bytecode (code) might change in this method so we are checking this very late.
2466   const bool has_receiver =
2467     code == Bytecodes::_invokespecial   ||
2468     code == Bytecodes::_invokevirtual   ||
2469     code == Bytecodes::_invokeinterface;
2470   Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
2471   Value recv = has_receiver ? apop() : nullptr;
2472 
2473   // A null check is required here (when there is a receiver) for any of the following cases
2474   // - invokespecial, always need a null check.
2475   // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized
2476   //   and require null checking. If the target is loaded a null check is emitted here.
2477   //   If the target isn't loaded the null check must happen after the call resolution. We achieve that
2478   //   by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry).
2479   //   (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may
2480   //   potentially fail, and can't have the null check before the resolution.)
2481   // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same
2482   //   reason as above, so calls with a receiver to unloaded targets can't be profiled.)

2491       null_check(recv);
2492     }
2493 
2494     if (is_profiling()) {
2495       // Note that we'd collect profile data in this method if we wanted it.
2496       compilation()->set_would_profile(true);
2497 
2498       if (profile_calls()) {
2499         assert(cha_monomorphic_target == nullptr || exact_target == nullptr, "both can not be set");
2500         ciKlass* target_klass = nullptr;
2501         if (cha_monomorphic_target != nullptr) {
2502           target_klass = cha_monomorphic_target->holder();
2503         } else if (exact_target != nullptr) {
2504           target_klass = exact_target->holder();
2505         }
2506         profile_call(target, recv, target_klass, collect_args_for_profiling(args, nullptr, false), false);
2507       }
2508     }
2509   }
2510 
2511   Invoke* result = new Invoke(code, return_type, recv, args, target, state_before);
2512   // push result
2513   append_split(result);
2514 
2515   if (!return_type->is_void()) {
2516     push(as_ValueType(return_type), result);
2517   }
2518 
2519   if (profile_return() && return_type->is_object()) {
2520     profile_return_type(result, target);
2521   }
2522 }
2523 
2524 
2525 void GraphBuilder::new_instance(int klass_index) {
2526   ValueStack* state_before = copy_state_exhandling();
2527   ciKlass* klass = stream()->get_klass();
2528   assert(klass->is_instance_klass(), "must be an instance klass");
2529   NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass(), false);
2530   _memory->new_instance(new_instance);
2531   apush(append_split(new_instance));
2532 }
2533 

2534 void GraphBuilder::new_type_array() {
2535   ValueStack* state_before = copy_state_exhandling();
2536   apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true)));
2537 }
2538 
2539 
2540 void GraphBuilder::new_object_array() {
2541   ciKlass* klass = stream()->get_klass();
2542   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2543   NewArray* n = new NewObjectArray(klass, ipop(), state_before);
2544   apush(append_split(n));
2545 }
2546 
2547 
2548 bool GraphBuilder::direct_compare(ciKlass* k) {
2549   if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
2550     ciInstanceKlass* ik = k->as_instance_klass();
2551     if (ik->is_final()) {
2552       return true;
2553     } else {

2586   ciKlass* klass = stream()->get_klass();
2587   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2588   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
2589   ipush(append_split(i));
2590   i->set_direct_compare(direct_compare(klass));
2591 
2592   if (is_profiling()) {
2593     // Note that we'd collect profile data in this method if we wanted it.
2594     compilation()->set_would_profile(true);
2595 
2596     if (profile_checkcasts()) {
2597       i->set_profiled_method(method());
2598       i->set_profiled_bci(bci());
2599       i->set_should_profile(true);
2600     }
2601   }
2602 }
2603 
2604 
2605 void GraphBuilder::monitorenter(Value x, int bci) {
2606   bool maybe_inlinetype = false;
2607   if (bci == InvocationEntryBci) {
2608     // Called by GraphBuilder::inline_sync_entry.
2609 #ifdef ASSERT
2610     ciType* obj_type = x->declared_type();
2611     assert(obj_type == nullptr || !obj_type->is_inlinetype(), "inline types cannot have synchronized methods");
2612 #endif
2613   } else {
2614     // We are compiling a monitorenter bytecode
2615     if (Arguments::is_valhalla_enabled()) {
2616       ciType* obj_type = x->declared_type();
2617       if (obj_type == nullptr || obj_type->can_be_inline_klass()) {
2618         // If we're (possibly) locking on an inline type, check for markWord::always_locked_pattern
2619         // and throw IMSE. (obj_type is null for Phi nodes, so let's just be conservative).
2620         maybe_inlinetype = true;
2621       }
2622     }
2623   }
2624 
2625   // save state before locking in case of deoptimization after a NullPointerException
2626   ValueStack* state_before = copy_state_for_exception_with_bci(bci);
2627   append_with_bci(new MonitorEnter(x, state()->lock(x), state_before, maybe_inlinetype), bci);
2628   kill_all();
2629 }
2630 
2631 
2632 void GraphBuilder::monitorexit(Value x, int bci) {
2633   append_with_bci(new MonitorExit(x, state()->unlock()), bci);
2634   kill_all();
2635 }
2636 
2637 
2638 void GraphBuilder::new_multi_array(int dimensions) {
2639   ciKlass* klass = stream()->get_klass();
2640   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
2641 
2642   Values* dims = new Values(dimensions, dimensions, nullptr);
2643   // fill in all dimensions
2644   int i = dimensions;
2645   while (i-- > 0) dims->at_put(i, ipop());
2646   // create array
2647   NewArray* n = new NewMultiArray(klass, dims, state_before);

2732 
2733 Instruction* GraphBuilder::append_split(StateSplit* instr) {
2734   return append_with_bci(instr, bci());
2735 }
2736 
2737 
2738 void GraphBuilder::null_check(Value value) {
2739   if (value->as_NewArray() != nullptr || value->as_NewInstance() != nullptr) {
2740     return;
2741   } else {
2742     Constant* con = value->as_Constant();
2743     if (con) {
2744       ObjectType* c = con->type()->as_ObjectType();
2745       if (c && c->is_loaded()) {
2746         ObjectConstant* oc = c->as_ObjectConstant();
2747         if (!oc || !oc->value()->is_null_object()) {
2748           return;
2749         }
2750       }
2751     }
2752     if (value->is_null_free()) return;
2753   }
2754   append(new NullCheck(value, copy_state_for_exception()));
2755 }
2756 
2757 
2758 
2759 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
2760   if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != nullptr)) {
2761     assert(instruction->exception_state() == nullptr
2762            || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
2763            || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()),
2764            "exception_state should be of exception kind");
2765     return new XHandlers();
2766   }
2767 
2768   XHandlers*  exception_handlers = new XHandlers();
2769   ScopeData*  cur_scope_data = scope_data();
2770   ValueStack* cur_state = instruction->state_before();
2771   ValueStack* prev_state = nullptr;
2772   int scope_count = 0;
2773 
2774   assert(cur_state != nullptr, "state_before must be set");
2775   do {
2776     int cur_bci = cur_state->bci();
2777     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2778     assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci()
2779            || has_pending_field_access() || has_pending_load_indexed(), "invalid bci");
2780 
2781 
2782     // join with all potential exception handlers
2783     XHandlers* list = cur_scope_data->xhandlers();
2784     const int n = list->length();
2785     for (int i = 0; i < n; i++) {
2786       XHandler* h = list->handler_at(i);
2787       if (h->covers(cur_bci)) {
2788         // h is a potential exception handler => join it
2789         compilation()->set_has_exception_handlers(true);
2790 
2791         BlockBegin* entry = h->entry_block();
2792         if (entry == block()) {
2793           // It's acceptable for an exception handler to cover itself
2794           // but we don't handle that in the parser currently.  It's
2795           // very rare so we bailout instead of trying to handle it.
2796           BAILOUT_("exception handler covers itself", exception_handlers);
2797         }
2798         assert(entry->bci() == h->handler_bci(), "must match");
2799         assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
2800 

3574     state->store_local(idx, new Local(type, vt, idx, false));
3575     idx += type->size();
3576   }
3577 
3578   // lock synchronized method
3579   if (method()->is_synchronized()) {
3580     state->lock(nullptr);
3581   }
3582 
3583   return state;
3584 }
3585 
3586 
3587 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
3588   : _scope_data(nullptr)
3589   , _compilation(compilation)
3590   , _memory(new MemoryBuffer())
3591   , _inline_bailout_msg(nullptr)
3592   , _instruction_count(0)
3593   , _osr_entry(nullptr)
3594   , _pending_field_access(nullptr)
3595   , _pending_load_indexed(nullptr)
3596 {
3597   int osr_bci = compilation->osr_bci();
3598 
3599   // determine entry points and bci2block mapping
3600   BlockListBuilder blm(compilation, scope, osr_bci);
3601   CHECK_BAILOUT();
3602 
3603   BlockList* bci2block = blm.bci2block();
3604   BlockBegin* start_block = bci2block->at(0);
3605 
3606   push_root_scope(scope, bci2block, start_block);
3607 
3608   // setup state for std entry
3609   _initial_state = state_at_entry();
3610   start_block->merge(_initial_state, compilation->has_irreducible_loops());
3611 
3612   // End nulls still exist here
3613 
3614   // complete graph
3615   _vmap        = new ValueMap();

4328   // Temporarily set up bytecode stream so we can append instructions
4329   // (only using the bci of this stream)
4330   scope_data()->set_stream(scope_data()->parent()->stream());
4331 
4332   // Pass parameters into callee state: add assignments
4333   // note: this will also ensure that all arguments are computed before being passed
4334   ValueStack* callee_state = state();
4335   ValueStack* caller_state = state()->caller_state();
4336   for (int i = args_base; i < caller_state->stack_size(); ) {
4337     const int arg_no = i - args_base;
4338     Value arg = caller_state->stack_at_inc(i);
4339     store_local(callee_state, arg, arg_no);
4340   }
4341 
4342   // Remove args from stack.
4343   // Note that we preserve locals state in case we can use it later
4344   // (see use of pop_scope() below)
4345   caller_state->truncate_stack(args_base);
4346   assert(callee_state->stack_size() == 0, "callee stack must be empty");
4347 
4348   // Check if we need a membar at the beginning of the java.lang.Object
4349   // constructor to satisfy the memory model for strict fields.
4350   if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
4351     Value receiver = state()->local_at(0);
4352     ciType* klass = receiver->exact_type();
4353     if (klass == nullptr) {
4354       // No exact type, check if the declared type has no implementors and add a dependency
4355       klass = receiver->declared_type();
4356       klass = compilation()->cha_exact_type(klass);
4357     }
4358     if (klass != nullptr && klass->is_instance_klass()) {
4359       // Exact receiver type, check if there is a strict field
4360       ciInstanceKlass* holder = klass->as_instance_klass();
4361       for (int i = 0; i < holder->nof_nonstatic_fields(); i++) {
4362         ciField* field = holder->nonstatic_field_at(i);
4363         if (field->is_strict()) {
4364           // Found a strict field, a membar is needed
4365           append(new MemBar(lir_membar_storestore));
4366           break;
4367         }
4368       }
4369     } else if (klass == nullptr) {
4370       // We can't statically determine the type of the receiver and therefore need
4371       // to put a membar here because it could have a strict field.
4372       append(new MemBar(lir_membar_storestore));
4373     }
4374   }
4375 
4376   Value lock = nullptr;
4377   BlockBegin* sync_handler = nullptr;
4378 
4379   // Inline the locking of the receiver if the callee is synchronized
4380   if (callee->is_synchronized()) {
4381     lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
4382                                : state()->local_at(0);
4383     sync_handler = new BlockBegin(SynchronizationEntryBCI);
4384     inline_sync_entry(lock, sync_handler);
4385   }
4386 
4387   if (compilation()->env()->dtrace_method_probes()) {
4388     Values* args = new Values(1);
4389     args->push(append(new Constant(new MethodConstant(method()))));
4390     append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
4391   }
4392 
4393   if (profile_inlined_calls()) {
4394     profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI));
4395   }
< prev index next >