1 /*
   2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "ci/ciSymbols.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "interpreter/linkResolver.hpp"
  31 #include "jvm_io.h"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "opto/addnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/convertnode.hpp"
  38 #include "opto/divnode.hpp"
  39 #include "opto/idealGraphPrinter.hpp"
  40 #include "opto/idealKit.hpp"
  41 #include "opto/inlinetypenode.hpp"
  42 #include "opto/matcher.hpp"
  43 #include "opto/memnode.hpp"
  44 #include "opto/mulnode.hpp"
  45 #include "opto/opaquenode.hpp"
  46 #include "opto/parse.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "runtime/deoptimization.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 
  51 #ifndef PRODUCT
  52 extern uint explicit_null_checks_inserted,
  53             explicit_null_checks_elided;
  54 #endif
  55 
  56 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
  57   // Feed unused profile data to type speculation
  58   if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
  59     ciKlass* array_type = nullptr;
  60     ciKlass* element_type = nullptr;
  61     ProfilePtrKind element_ptr = ProfileMaybeNull;
  62     bool flat_array = true;
  63     bool null_free_array = true;
  64     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
  65     if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
  66       ld = record_profile_for_speculation(ld, element_type, element_ptr);
  67     }
  68   }
  69   return ld;
  70 }
  71 
  72 
  73 //---------------------------------array_load----------------------------------
  74 void Parse::array_load(BasicType bt) {
  75   const Type* elemtype = Type::TOP;
  76   Node* adr = array_addressing(bt, 0, elemtype);
  77   if (stopped())  return;     // guaranteed null or range check
  78 
  79   Node* array_index = pop();
  80   Node* array = pop();
  81 
  82   // Handle inline type arrays
  83   const TypeOopPtr* element_ptr = elemtype->make_oopptr();
  84   const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
  85   if (array_type->is_flat()) {
  86     // Load from flat inline type array
  87     Node* inline_type;
  88     if (element_ptr->klass_is_exact()) {
  89       inline_type = InlineTypeNode::make_from_flat(this, elemtype->inline_klass(), array, adr);
  90     } else {
  91       // Element type of flat array is not exact. Therefore, we cannot determine the flat array layout statically.
  92       // Emit a runtime call to load the element from the flat array.
  93       inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
  94       inline_type = record_profile_for_speculation_at_array_load(inline_type);
  95     }
  96     push(inline_type);
  97     return;
  98   }
  99 
 100   if (!array_type->is_not_flat()) {
 101     // Cannot statically determine if array is a flat array, emit runtime check
 102     assert(UseFlatArray && is_reference_type(bt) && element_ptr->can_be_inline_type() && !array_type->is_not_null_free() &&
 103            (!element_ptr->is_inlinetypeptr() || element_ptr->inline_klass()->flat_in_array()), "array can't be flat");
 104     IdealKit ideal(this);
 105     IdealVariable res(ideal);
 106     ideal.declarations_done();
 107     ideal.if_then(flat_array_test(array, /* flat = */ false)); {
 108       // Non-flat array
 109       assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 110       sync_kit(ideal);
 111       const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 112       DecoratorSet decorator_set = IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD;
 113       if (needs_range_check(array_type->size(), array_index)) {
 114         // We've emitted a RangeCheck but now insert an additional check between the range check and the actual load.
 115         // We cannot pin the load to two separate nodes. Instead, we pin it conservatively here such that it cannot
 116         // possibly float above the range check at any point.
 117         decorator_set |= C2_UNKNOWN_CONTROL_LOAD;
 118       }
 119       Node* ld = access_load_at(array, adr, adr_type, element_ptr, bt, decorator_set);
 120       if (element_ptr->is_inlinetypeptr()) {
 121         assert(element_ptr->maybe_null(), "null free array should be handled above");
 122         ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass(), false);
 123       }
 124       ideal.sync_kit(this);
 125       ideal.set(res, ld);
 126     } ideal.else_(); {
 127       // Flat array
 128       sync_kit(ideal);
 129       if (element_ptr->is_inlinetypeptr()) {
 130         // Element type is known, cast and load from flat array layout.
 131         ciInlineKlass* vk = element_ptr->inline_klass();
 132         assert(vk->flat_in_array() && element_ptr->maybe_null(), "never/always flat - should be optimized");
 133         ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true);
 134         const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 135         Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, arytype));
 136         Node* casted_adr = array_element_address(cast, array_index, T_OBJECT, array_type->size(), control());
 137         // Re-execute flat array load if buffering triggers deoptimization
 138         PreserveReexecuteState preexecs(this);
 139         jvms()->set_should_reexecute(true);
 140         inc_sp(2);
 141         Node* vt = InlineTypeNode::make_from_flat(this, vk, cast, casted_adr)->buffer(this, false);
 142         ideal.set(res, vt);
 143         ideal.sync_kit(this);
 144       } else {
 145         // Element type is unknown, and thus we cannot statically determine the exact flat array layout. Emit a
 146         // runtime call to correctly load the inline type element from the flat array.
 147         Node* inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
 148         ideal.sync_kit(this);
 149         ideal.set(res, inline_type);
 150       }
 151     } ideal.end_if();
 152     sync_kit(ideal);
 153     Node* ld = _gvn.transform(ideal.value(res));
 154     ld = record_profile_for_speculation_at_array_load(ld);
 155     push_node(bt, ld);
 156     return;
 157   }
 158 
 159   if (array_type->is_null_free()) {
 160     // Load from non-flat inline type array (elements can never be null)
 161     bt = T_OBJECT;
 162   }
 163 
 164   if (elemtype == TypeInt::BOOL) {
 165     bt = T_BOOLEAN;
 166   }
 167   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 168   Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
 169                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 170   ld = record_profile_for_speculation_at_array_load(ld);
 171   // Loading an inline type from a non-flat array
 172   if (element_ptr != nullptr && element_ptr->is_inlinetypeptr()) {
 173     assert(!array_type->is_null_free() || !element_ptr->maybe_null(), "inline type array elements should never be null");
 174     ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass(), !element_ptr->maybe_null());
 175   }
 176   push_node(bt, ld);
 177 }
 178 
 179 Node* Parse::load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr) {
 180   // Below membars keep this access to an unknown flat array correctly
 181   // ordered with other unknown and known flat array accesses.
 182   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 183 
 184   Node* call = nullptr;
 185   {
 186     // Re-execute flat array load if runtime call triggers deoptimization
 187     PreserveReexecuteState preexecs(this);
 188     jvms()->set_bci(_bci);
 189     jvms()->set_should_reexecute(true);
 190     inc_sp(2);
 191     kill_dead_locals();
 192     call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 193                              OptoRuntime::load_unknown_inline_Type(),
 194                              OptoRuntime::load_unknown_inline_Java(),
 195                              nullptr, TypeRawPtr::BOTTOM,
 196                              array, array_index);
 197   }
 198   make_slow_call_ex(call, env()->Throwable_klass(), false);
 199   Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
 200 
 201   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 202 
 203   // Keep track of the information that the inline type is in flat arrays
 204   const Type* unknown_value = element_ptr->is_instptr()->cast_to_flat_in_array();
 205   return _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
 206 }
 207 
 208 //--------------------------------array_store----------------------------------
 209 void Parse::array_store(BasicType bt) {
 210   const Type* elemtype = Type::TOP;
 211   Node* adr = array_addressing(bt, type2size[bt], elemtype);
 212   if (stopped())  return;     // guaranteed null or range check
 213   Node* stored_value_casted = nullptr;
 214   if (bt == T_OBJECT) {
 215     stored_value_casted = array_store_check(adr, elemtype);
 216     if (stopped()) {
 217       return;
 218     }
 219   }
 220   Node* const stored_value = pop_node(bt); // Value to store
 221   Node* const array_index = pop();         // Index in the array
 222   Node* array = pop();                     // The array itself
 223 
 224   const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
 225   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 226 
 227   if (elemtype == TypeInt::BOOL) {
 228     bt = T_BOOLEAN;
 229   } else if (bt == T_OBJECT) {
 230     elemtype = elemtype->make_oopptr();
 231     const Type* stored_value_casted_type = _gvn.type(stored_value_casted);
 232     // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
 233     // This is only legal for non-null stores because the array_store_check always passes for null, even
 234     // if the array is null-free. Null stores are handled in GraphKit::inline_array_null_guard().
 235     bool not_null_free = !stored_value_casted_type->maybe_null() &&
 236                          !stored_value_casted_type->is_oopptr()->can_be_inline_type();
 237     bool not_flat = not_null_free || (stored_value_casted_type->is_inlinetypeptr() &&
 238                                       !stored_value_casted_type->inline_klass()->flat_in_array());
 239     if (!array_type->is_not_null_free() && not_null_free) {
 240       // Storing a non-inline type, mark array as not null-free (-> not flat).
 241       array_type = array_type->cast_to_not_null_free();
 242       Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
 243       replace_in_map(array, cast);
 244       array = cast;
 245     } else if (!array_type->is_not_flat() && not_flat) {
 246       // Storing to a non-flat array, mark array as not flat.
 247       array_type = array_type->cast_to_not_flat();
 248       Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
 249       replace_in_map(array, cast);
 250       array = cast;
 251     }
 252 
 253     if (array_type->is_flat()) {
 254       // Store to flat inline type array
 255       assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
 256       if (array_type->klass_is_exact()) {
 257         // Store to exact flat inline type array where we know the flat array layout statically.
 258         // Re-execute flat array store if buffering triggers deoptimization
 259         PreserveReexecuteState preexecs(this);
 260         inc_sp(3);
 261         jvms()->set_should_reexecute(true);
 262         stored_value_casted->as_InlineType()->store_flat(this, array, adr, nullptr, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 263       } else {
 264         // Element type of flat array is not exact. Therefore, we cannot determine the flat array layout statically.
 265         // Emit a runtime call to store the element to the flat array.
 266         store_to_unknown_flat_array(array, array_index, stored_value_casted);
 267       }
 268       return;
 269     }
 270     if (array_type->is_null_free()) {
 271       // Store to non-flat null-free inline type array (elements can never be null)
 272       assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
 273       if (elemtype->inline_klass()->is_empty()) {
 274         // Ignore empty inline stores, array is already initialized.
 275         return;
 276       }
 277     } else if (!array_type->is_not_flat() && (stored_value_casted_type != TypePtr::NULL_PTR || StressReflectiveCode)) {
 278       // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
 279       assert(UseFlatArray && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
 280              !array_type->klass_is_exact() && !array_type->is_not_null_free(), "array can't be a flat array");
 281       IdealKit ideal(this);
 282       ideal.if_then(flat_array_test(array, /* flat = */ false)); {
 283         // Non-flat array
 284         assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 285         sync_kit(ideal);
 286         Node* cast_array = inline_array_null_guard(array, stored_value_casted, 3);
 287         inc_sp(3);
 288         access_store_at(cast_array, adr, adr_type, stored_value_casted, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
 289         dec_sp(3);
 290         ideal.sync_kit(this);
 291       } ideal.else_(); {
 292         sync_kit(ideal);
 293         // flat array
 294         Node* null_ctl = top();
 295         Node* null_checked_stored_value_casted = null_check_oop(stored_value_casted, &null_ctl);
 296         if (null_ctl != top()) {
 297           PreserveJVMState pjvms(this);
 298           inc_sp(3);
 299           set_control(null_ctl);
 300           uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
 301           dec_sp(3);
 302         }
 303         // Try to determine the inline klass
 304         ciInlineKlass* inline_Klass = nullptr;
 305         if (stored_value_casted_type->is_inlinetypeptr()) {
 306           inline_Klass = stored_value_casted_type->inline_klass();
 307         } else if (elemtype->is_inlinetypeptr()) {
 308           inline_Klass = elemtype->inline_klass();
 309         }
 310         if (!stopped()) {
 311           if (inline_Klass != nullptr) {
 312             // Element type is known, cast and store to flat array layout.
 313             assert(inline_Klass->flat_in_array() && elemtype->maybe_null(), "never/always flat - should be optimized");
 314             ciArrayKlass* array_klass = ciArrayKlass::make(inline_Klass, /* null_free */ true);
 315             const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 316             Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, arytype));
 317             Node* casted_adr = array_element_address(casted_array, array_index, T_OBJECT, arytype->size(), control());
 318             if (!null_checked_stored_value_casted->is_InlineType()) {
 319               assert(!gvn().type(null_checked_stored_value_casted)->maybe_null(),
 320                      "inline type array elements should never be null");
 321               null_checked_stored_value_casted = InlineTypeNode::make_from_oop(this, null_checked_stored_value_casted,
 322                                                                                inline_Klass);
 323             }
 324             // Re-execute flat array store if buffering triggers deoptimization
 325             PreserveReexecuteState preexecs(this);
 326             inc_sp(3);
 327             jvms()->set_should_reexecute(true);
 328             null_checked_stored_value_casted->as_InlineType()->store_flat(this, casted_array, casted_adr, nullptr, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 329           } else {
 330             // Element type is unknown, emit a runtime call since the flat array layout is not statically known.
 331             store_to_unknown_flat_array(array, array_index, null_checked_stored_value_casted);
 332           }
 333         }
 334         ideal.sync_kit(this);
 335       }
 336       ideal.end_if();
 337       sync_kit(ideal);
 338       return;
 339     } else if (!array_type->is_not_null_free()) {
 340       // Array is not flat but may be null free
 341       assert(elemtype->is_oopptr()->can_be_inline_type(), "array can't be null-free");
 342       array = inline_array_null_guard(array, stored_value_casted, 3, true);
 343     }
 344   }
 345   inc_sp(3);
 346   access_store_at(array, adr, adr_type, stored_value, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 347   dec_sp(3);
 348 }
 349 
 350 // Emit a runtime call to store to a flat array whose element type is either unknown (i.e. we do not know the flat
 351 // array layout) or not exact (could have different flat array layouts at runtime).
 352 void Parse::store_to_unknown_flat_array(Node* array, Node* const idx, Node* non_null_stored_value) {
 353   // Below membars keep this access to an unknown flat array correctly
 354   // ordered with other unknown and known flat array accesses.
 355   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 356 
 357   Node* call = nullptr;
 358   {
 359     // Re-execute flat array store if runtime call triggers deoptimization
 360     PreserveReexecuteState preexecs(this);
 361     jvms()->set_bci(_bci);
 362     jvms()->set_should_reexecute(true);
 363     inc_sp(3);
 364     kill_dead_locals();
 365     call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 366                       OptoRuntime::store_unknown_inline_Type(),
 367                       OptoRuntime::store_unknown_inline_Java(),
 368                       nullptr, TypeRawPtr::BOTTOM,
 369                       non_null_stored_value, array, idx);
 370   }
 371   make_slow_call_ex(call, env()->Throwable_klass(), false);
 372 
 373   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 374 }
 375 
 376 //------------------------------array_addressing-------------------------------
 377 // Pull array and index from the stack.  Compute pointer-to-element.
 378 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 379   Node *idx   = peek(0+vals);   // Get from stack without popping
 380   Node *ary   = peek(1+vals);   // in case of exception
 381 
 382   // Null check the array base, with correct stack contents
 383   ary = null_check(ary, T_ARRAY);
 384   // Compile-time detect of null-exception?
 385   if (stopped())  return top();
 386 
 387   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 388   const TypeInt*    sizetype = arytype->size();
 389   elemtype = arytype->elem();
 390 
 391   if (UseUniqueSubclasses) {
 392     const Type* el = elemtype->make_ptr();
 393     if (el && el->isa_instptr()) {
 394       const TypeInstPtr* toop = el->is_instptr();
 395       if (toop->instance_klass()->unique_concrete_subklass()) {
 396         // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
 397         const Type* subklass = Type::get_const_type(toop->instance_klass());
 398         elemtype = subklass->join_speculative(el);
 399       }
 400     }
 401   }
 402 
 403   if (!arytype->is_loaded()) {
 404     // Only fails for some -Xcomp runs
 405     // The class is unloaded.  We have to run this bytecode in the interpreter.
 406     ciKlass* klass = arytype->unloaded_klass();
 407 
 408     uncommon_trap(Deoptimization::Reason_unloaded,
 409                   Deoptimization::Action_reinterpret,
 410                   klass, "!loaded array");
 411     return top();
 412   }
 413 
 414   ary = create_speculative_inline_type_array_checks(ary, arytype, elemtype);
 415 
 416   if (needs_range_check(sizetype, idx)) {
 417     create_range_check(idx, ary, sizetype);
 418   } else if (C->log() != nullptr) {
 419     C->log()->elem("observe that='!need_range_check'");
 420   }
 421 
 422   // Check for always knowing you are throwing a range-check exception
 423   if (stopped())  return top();
 424 
 425   // Make array address computation control dependent to prevent it
 426   // from floating above the range check during loop optimizations.
 427   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 428   assert(ptr != top(), "top should go hand-in-hand with stopped");
 429 
 430   return ptr;
 431 }
 432 
 433 // Check if we need a range check for an array access. This is the case if the index is either negative or if it could
 434 // be greater or equal the smallest possible array size (i.e. out-of-bounds).
 435 bool Parse::needs_range_check(const TypeInt* size_type, const Node* index) const {
 436   const TypeInt* index_type = _gvn.type(index)->is_int();
 437   return index_type->_hi >= size_type->_lo || index_type->_lo < 0;
 438 }
 439 
 440 void Parse::create_range_check(Node* idx, Node* ary, const TypeInt* sizetype) {
 441   Node* tst;
 442   if (sizetype->_hi <= 0) {
 443     // The greatest array bound is negative, so we can conclude that we're
 444     // compiling unreachable code, but the unsigned compare trick used below
 445     // only works with non-negative lengths.  Instead, hack "tst" to be zero so
 446     // the uncommon_trap path will always be taken.
 447     tst = _gvn.intcon(0);
 448   } else {
 449     // Range is constant in array-oop, so we can use the original state of mem
 450     Node* len = load_array_length(ary);
 451 
 452     // Test length vs index (standard trick using unsigned compare)
 453     Node* chk = _gvn.transform(new CmpUNode(idx, len) );
 454     BoolTest::mask btest = BoolTest::lt;
 455     tst = _gvn.transform(new BoolNode(chk, btest) );
 456   }
 457   RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 458   _gvn.set_type(rc, rc->Value(&_gvn));
 459   if (!tst->is_Con()) {
 460     record_for_igvn(rc);
 461   }
 462   set_control(_gvn.transform(new IfTrueNode(rc)));
 463   // Branch to failure if out of bounds
 464   {
 465     PreserveJVMState pjvms(this);
 466     set_control(_gvn.transform(new IfFalseNode(rc)));
 467     if (C->allow_range_check_smearing()) {
 468       // Do not use builtin_throw, since range checks are sometimes
 469       // made more stringent by an optimistic transformation.
 470       // This creates "tentative" range checks at this point,
 471       // which are not guaranteed to throw exceptions.
 472       // See IfNode::Ideal, is_range_check, adjust_check.
 473       uncommon_trap(Deoptimization::Reason_range_check,
 474                     Deoptimization::Action_make_not_entrant,
 475                     nullptr, "range_check");
 476     } else {
 477       // If we have already recompiled with the range-check-widening
 478       // heroic optimization turned off, then we must really be throwing
 479       // range check exceptions.
 480       builtin_throw(Deoptimization::Reason_range_check);
 481     }
 482   }
 483 }
 484 
 485 // For inline type arrays, we can use the profiling information for array accesses to speculate on the type, flatness,
 486 // and null-freeness. We can either prepare the speculative type for later uses or emit explicit speculative checks with
 487 // traps now. In the latter case, the speculative type guarantees can avoid additional runtime checks later (e.g.
 488 // non-null-free implies non-flat which allows us to remove flatness checks). This makes the graph simpler.
 489 Node* Parse::create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type,
 490                                                          const Type*& element_type) {
 491   if (!array_type->is_flat() && !array_type->is_not_flat()) {
 492     // For arrays that might be flat, speculate that the array has the exact type reported in the profile data such that
 493     // we can rely on a fixed memory layout (i.e. either a flat layout or not).
 494     array = cast_to_speculative_array_type(array, array_type, element_type);
 495   } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
 496     // Array is known to be either flat or not flat. If possible, update the speculative type by using the profile data
 497     // at this bci.
 498     array = cast_to_profiled_array_type(array);
 499   }
 500 
 501   // Even though the type does not tell us whether we have an inline type array or not, we can still check the profile data
 502   // whether we have a non-null-free or non-flat array. Since non-null-free implies non-flat, we check this first.
 503   // Speculating on a non-null-free array doesn't help aaload but could be profitable for a subsequent aastore.
 504   if (!array_type->is_null_free() && !array_type->is_not_null_free()) {
 505     array = speculate_non_null_free_array(array, array_type);
 506   }
 507 
 508   if (!array_type->is_flat() && !array_type->is_not_flat()) {
 509     array = speculate_non_flat_array(array, array_type);
 510   }
 511   return array;
 512 }
 513 
 514 // Speculate that the array has the exact type reported in the profile data. We emit a trap when this turns out to be
 515 // wrong. On the fast path, we add a CheckCastPP to use the exact type.
 516 Node* Parse::cast_to_speculative_array_type(Node* const array, const TypeAryPtr*& array_type, const Type*& element_type) {
 517   Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
 518   ciKlass* speculative_array_type = array_type->speculative_type();
 519   if (too_many_traps_or_recompiles(reason) || speculative_array_type == nullptr) {
 520     // No speculative type, check profile data at this bci
 521     speculative_array_type = nullptr;
 522     reason = Deoptimization::Reason_class_check;
 523     if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 524       ciKlass* profiled_element_type = nullptr;
 525       ProfilePtrKind element_ptr = ProfileMaybeNull;
 526       bool flat_array = true;
 527       bool null_free_array = true;
 528       method()->array_access_profiled_type(bci(), speculative_array_type, profiled_element_type, element_ptr, flat_array,
 529                                            null_free_array);
 530     }
 531   }
 532   if (speculative_array_type != nullptr) {
 533     // Speculate that this array has the exact type reported by profile data
 534     Node* casted_array = nullptr;
 535     DEBUG_ONLY(Node* old_control = control();)
 536     Node* slow_ctl = type_check_receiver(array, speculative_array_type, 1.0, &casted_array);
 537     if (stopped()) {
 538       // The check always fails and therefore profile information is incorrect. Don't use it.
 539       assert(old_control == slow_ctl, "type check should have been removed");
 540       set_control(slow_ctl);
 541     } else if (!slow_ctl->is_top()) {
 542       { PreserveJVMState pjvms(this);
 543         set_control(slow_ctl);
 544         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 545       }
 546       replace_in_map(array, casted_array);
 547       array_type = _gvn.type(casted_array)->is_aryptr();
 548       element_type = array_type->elem();
 549       return casted_array;
 550     }
 551   }
 552   return array;
 553 }
 554 
 555 // Create a CheckCastPP when the speculative type can improve the current type.
 556 Node* Parse::cast_to_profiled_array_type(Node* const array) {
 557   ciKlass* array_type = nullptr;
 558   ciKlass* element_type = nullptr;
 559   ProfilePtrKind element_ptr = ProfileMaybeNull;
 560   bool flat_array = true;
 561   bool null_free_array = true;
 562   method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 563   if (array_type != nullptr) {
 564     return record_profile_for_speculation(array, array_type, ProfileMaybeNull);
 565   }
 566   return array;
 567 }
 568 
 569 // Speculate that the array is non-null-free. This will imply non-flatness. We emit a trap when this turns out to be
 570 // wrong. On the fast path, we add a CheckCastPP to use the non-null-free type.
 571 Node* Parse::speculate_non_null_free_array(Node* const array, const TypeAryPtr*& array_type) {
 572   bool null_free_array = true;
 573   Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 574   if (array_type->speculative() != nullptr &&
 575       array_type->speculative()->is_aryptr()->is_not_null_free() &&
 576       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 577     null_free_array = false;
 578     reason = Deoptimization::Reason_speculate_class_check;
 579   } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
 580     ciKlass* profiled_array_type = nullptr;
 581     ciKlass* profiled_element_type = nullptr;
 582     ProfilePtrKind element_ptr = ProfileMaybeNull;
 583     bool flat_array = true;
 584     method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
 585                                          null_free_array);
 586     reason = Deoptimization::Reason_class_check;
 587   }
 588   if (!null_free_array) {
 589     { // Deoptimize if null-free array
 590       BuildCutout unless(this, null_free_array_test(array, /* null_free = */ false), PROB_MAX);
 591       uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 592     }
 593     assert(!stopped(), "null-free array should have been caught earlier");
 594     Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_null_free()));
 595     replace_in_map(array, casted_array);
 596     array_type = _gvn.type(casted_array)->is_aryptr();
 597     return casted_array;
 598   }
 599   return array;
 600 }
 601 
 602 // Speculate that the array is non-flat. We emit a trap when this turns out to be wrong. On the fast path, we add a
 603 // CheckCastPP to use the non-flat type.
 604 Node* Parse::speculate_non_flat_array(Node* const array, const TypeAryPtr* const array_type) {
 605   bool flat_array = true;
 606   Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 607   if (array_type->speculative() != nullptr &&
 608       array_type->speculative()->is_aryptr()->is_not_flat() &&
 609       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 610     flat_array = false;
 611     reason = Deoptimization::Reason_speculate_class_check;
 612   } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 613     ciKlass* profiled_array_type = nullptr;
 614     ciKlass* profiled_element_type = nullptr;
 615     ProfilePtrKind element_ptr = ProfileMaybeNull;
 616     bool null_free_array = true;
 617     method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
 618                                          null_free_array);
 619     reason = Deoptimization::Reason_class_check;
 620   }
 621   if (!flat_array) {
 622     { // Deoptimize if flat array
 623       BuildCutout unless(this, flat_array_test(array, /* flat = */ false), PROB_MAX);
 624       uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 625     }
 626     assert(!stopped(), "flat array should have been caught earlier");
 627     Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_flat()));
 628     replace_in_map(array, casted_array);
 629     return casted_array;
 630   }
 631   return array;
 632 }
 633 
 634 // returns IfNode
 635 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 636   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 637   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 638   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 639   return iff;
 640 }
 641 
 642 
 643 // sentinel value for the target bci to mark never taken branches
 644 // (according to profiling)
 645 static const int never_reached = INT_MAX;
 646 
 647 //------------------------------helper for tableswitch-------------------------
 648 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 649   // True branch, use existing map info
 650   { PreserveJVMState pjvms(this);
 651     Node *iftrue  = _gvn.transform( new IfTrueNode (iff) );
 652     set_control( iftrue );
 653     if (unc) {
 654       repush_if_args();
 655       uncommon_trap(Deoptimization::Reason_unstable_if,
 656                     Deoptimization::Action_reinterpret,
 657                     nullptr,
 658                     "taken always");
 659     } else {
 660       assert(dest_bci_if_true != never_reached, "inconsistent dest");
 661       merge_new_path(dest_bci_if_true);
 662     }
 663   }
 664 
 665   // False branch
 666   Node *iffalse = _gvn.transform( new IfFalseNode(iff) );
 667   set_control( iffalse );
 668 }
 669 
 670 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 671   // True branch, use existing map info
 672   { PreserveJVMState pjvms(this);
 673     Node *iffalse  = _gvn.transform( new IfFalseNode (iff) );
 674     set_control( iffalse );
 675     if (unc) {
 676       repush_if_args();
 677       uncommon_trap(Deoptimization::Reason_unstable_if,
 678                     Deoptimization::Action_reinterpret,
 679                     nullptr,
 680                     "taken never");
 681     } else {
 682       assert(dest_bci_if_true != never_reached, "inconsistent dest");
 683       merge_new_path(dest_bci_if_true);
 684     }
 685   }
 686 
 687   // False branch
 688   Node *iftrue = _gvn.transform( new IfTrueNode(iff) );
 689   set_control( iftrue );
 690 }
 691 
 692 void Parse::jump_if_always_fork(int dest_bci, bool unc) {
 693   // False branch, use existing map and control()
 694   if (unc) {
 695     repush_if_args();
 696     uncommon_trap(Deoptimization::Reason_unstable_if,
 697                   Deoptimization::Action_reinterpret,
 698                   nullptr,
 699                   "taken never");
 700   } else {
 701     assert(dest_bci != never_reached, "inconsistent dest");
 702     merge_new_path(dest_bci);
 703   }
 704 }
 705 
 706 
 707 extern "C" {
 708   static int jint_cmp(const void *i, const void *j) {
 709     int a = *(jint *)i;
 710     int b = *(jint *)j;
 711     return a > b ? 1 : a < b ? -1 : 0;
 712   }
 713 }
 714 
 715 
 716 class SwitchRange : public StackObj {
 717   // a range of integers coupled with a bci destination
 718   jint _lo;                     // inclusive lower limit
 719   jint _hi;                     // inclusive upper limit
 720   int _dest;
 721   float _cnt;                   // how many times this range was hit according to profiling
 722 
 723 public:
 724   jint lo() const              { return _lo;   }
 725   jint hi() const              { return _hi;   }
 726   int  dest() const            { return _dest; }
 727   bool is_singleton() const    { return _lo == _hi; }
 728   float cnt() const            { return _cnt; }
 729 
 730   void setRange(jint lo, jint hi, int dest, float cnt) {
 731     assert(lo <= hi, "must be a non-empty range");
 732     _lo = lo, _hi = hi; _dest = dest; _cnt = cnt;
 733     assert(_cnt >= 0, "");
 734   }
 735   bool adjoinRange(jint lo, jint hi, int dest, float cnt, bool trim_ranges) {
 736     assert(lo <= hi, "must be a non-empty range");
 737     if (lo == _hi+1) {
 738       // see merge_ranges() comment below
 739       if (trim_ranges) {
 740         if (cnt == 0) {
 741           if (_cnt != 0) {
 742             return false;
 743           }
 744           if (dest != _dest) {
 745             _dest = never_reached;
 746           }
 747         } else {
 748           if (_cnt == 0) {
 749             return false;
 750           }
 751           if (dest != _dest) {
 752             return false;
 753           }
 754         }
 755       } else {
 756         if (dest != _dest) {
 757           return false;
 758         }
 759       }
 760       _hi = hi;
 761       _cnt += cnt;
 762       return true;
 763     }
 764     return false;
 765   }
 766 
 767   void set (jint value, int dest, float cnt) {
 768     setRange(value, value, dest, cnt);
 769   }
 770   bool adjoin(jint value, int dest, float cnt, bool trim_ranges) {
 771     return adjoinRange(value, value, dest, cnt, trim_ranges);
 772   }
 773   bool adjoin(SwitchRange& other) {
 774     return adjoinRange(other._lo, other._hi, other._dest, other._cnt, false);
 775   }
 776 
 777   void print() {
 778     if (is_singleton())
 779       tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt());
 780     else if (lo() == min_jint)
 781       tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt());
 782     else if (hi() == max_jint)
 783       tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt());
 784     else
 785       tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt());
 786   }
 787 };
 788 
 789 // We try to minimize the number of ranges and the size of the taken
 790 // ones using profiling data. When ranges are created,
 791 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge
 792 // if both were never hit or both were hit to build longer unreached
 793 // ranges. Here, we now merge adjoining ranges with the same
 794 // destination and finally set destination of unreached ranges to the
 795 // special value never_reached because it can help minimize the number
 796 // of tests that are necessary.
 797 //
 798 // For instance:
 799 // [0, 1] to target1 sometimes taken
 800 // [1, 2] to target1 never taken
 801 // [2, 3] to target2 never taken
 802 // would lead to:
 803 // [0, 1] to target1 sometimes taken
 804 // [1, 3] never taken
 805 //
 806 // (first 2 ranges to target1 are not merged)
 807 static void merge_ranges(SwitchRange* ranges, int& rp) {
 808   if (rp == 0) {
 809     return;
 810   }
 811   int shift = 0;
 812   for (int j = 0; j < rp; j++) {
 813     SwitchRange& r1 = ranges[j-shift];
 814     SwitchRange& r2 = ranges[j+1];
 815     if (r1.adjoin(r2)) {
 816       shift++;
 817     } else if (shift > 0) {
 818       ranges[j+1-shift] = r2;
 819     }
 820   }
 821   rp -= shift;
 822   for (int j = 0; j <= rp; j++) {
 823     SwitchRange& r = ranges[j];
 824     if (r.cnt() == 0 && r.dest() != never_reached) {
 825       r.setRange(r.lo(), r.hi(), never_reached, r.cnt());
 826     }
 827   }
 828 }
 829 
 830 //-------------------------------do_tableswitch--------------------------------
 831 void Parse::do_tableswitch() {
 832   // Get information about tableswitch
 833   int default_dest = iter().get_dest_table(0);
 834   jint lo_index    = iter().get_int_table(1);
 835   jint hi_index    = iter().get_int_table(2);
 836   int len          = hi_index - lo_index + 1;
 837 
 838   if (len < 1) {
 839     // If this is a backward branch, add safepoint
 840     maybe_add_safepoint(default_dest);
 841     pop(); // the effect of the instruction execution on the operand stack
 842     merge(default_dest);
 843     return;
 844   }
 845 
 846   ciMethodData* methodData = method()->method_data();
 847   ciMultiBranchData* profile = nullptr;
 848   if (methodData->is_mature() && UseSwitchProfiling) {
 849     ciProfileData* data = methodData->bci_to_data(bci());
 850     if (data != nullptr && data->is_MultiBranchData()) {
 851       profile = (ciMultiBranchData*)data;
 852     }
 853   }
 854   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
 855 
 856   // generate decision tree, using trichotomy when possible
 857   int rnum = len+2;
 858   bool makes_backward_branch = (default_dest <= bci());
 859   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 860   int rp = -1;
 861   if (lo_index != min_jint) {
 862     float cnt = 1.0F;
 863     if (profile != nullptr) {
 864       cnt = (float)profile->default_count() / (hi_index != max_jint ? 2.0F : 1.0F);
 865     }
 866     ranges[++rp].setRange(min_jint, lo_index-1, default_dest, cnt);
 867   }
 868   for (int j = 0; j < len; j++) {
 869     jint match_int = lo_index+j;
 870     int  dest      = iter().get_dest_table(j+3);
 871     makes_backward_branch |= (dest <= bci());
 872     float cnt = 1.0F;
 873     if (profile != nullptr) {
 874       cnt = (float)profile->count_at(j);
 875     }
 876     if (rp < 0 || !ranges[rp].adjoin(match_int, dest, cnt, trim_ranges)) {
 877       ranges[++rp].set(match_int, dest, cnt);
 878     }
 879   }
 880   jint highest = lo_index+(len-1);
 881   assert(ranges[rp].hi() == highest, "");
 882   if (highest != max_jint) {
 883     float cnt = 1.0F;
 884     if (profile != nullptr) {
 885       cnt = (float)profile->default_count() / (lo_index != min_jint ? 2.0F : 1.0F);
 886     }
 887     if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, cnt, trim_ranges)) {
 888       ranges[++rp].setRange(highest+1, max_jint, default_dest, cnt);
 889     }
 890   }
 891   assert(rp < len+2, "not too many ranges");
 892 
 893   if (trim_ranges) {
 894     merge_ranges(ranges, rp);
 895   }
 896 
 897   // Safepoint in case if backward branch observed
 898   if (makes_backward_branch) {
 899     add_safepoint();
 900   }
 901 
 902   Node* lookup = pop(); // lookup value
 903   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 904 }
 905 
 906 
 907 //------------------------------do_lookupswitch--------------------------------
 908 void Parse::do_lookupswitch() {
 909   // Get information about lookupswitch
 910   int default_dest = iter().get_dest_table(0);
 911   jint len          = iter().get_int_table(1);
 912 
 913   if (len < 1) {    // If this is a backward branch, add safepoint
 914     maybe_add_safepoint(default_dest);
 915     pop(); // the effect of the instruction execution on the operand stack
 916     merge(default_dest);
 917     return;
 918   }
 919 
 920   ciMethodData* methodData = method()->method_data();
 921   ciMultiBranchData* profile = nullptr;
 922   if (methodData->is_mature() && UseSwitchProfiling) {
 923     ciProfileData* data = methodData->bci_to_data(bci());
 924     if (data != nullptr && data->is_MultiBranchData()) {
 925       profile = (ciMultiBranchData*)data;
 926     }
 927   }
 928   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
 929 
 930   // generate decision tree, using trichotomy when possible
 931   jint* table = NEW_RESOURCE_ARRAY(jint, len*3);
 932   {
 933     for (int j = 0; j < len; j++) {
 934       table[3*j+0] = iter().get_int_table(2+2*j);
 935       table[3*j+1] = iter().get_dest_table(2+2*j+1);
 936       // Handle overflow when converting from uint to jint
 937       table[3*j+2] = (profile == nullptr) ? 1 : (jint)MIN2<uint>((uint)max_jint, profile->count_at(j));
 938     }
 939     qsort(table, len, 3*sizeof(table[0]), jint_cmp);
 940   }
 941 
 942   float default_cnt = 1.0F;
 943   if (profile != nullptr) {
 944     juint defaults = max_juint - len;
 945     default_cnt = (float)profile->default_count()/(float)defaults;
 946   }
 947 
 948   int rnum = len*2+1;
 949   bool makes_backward_branch = (default_dest <= bci());
 950   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 951   int rp = -1;
 952   for (int j = 0; j < len; j++) {
 953     jint match_int   = table[3*j+0];
 954     jint  dest        = table[3*j+1];
 955     jint  cnt         = table[3*j+2];
 956     jint  next_lo     = rp < 0 ? min_jint : ranges[rp].hi()+1;
 957     makes_backward_branch |= (dest <= bci());
 958     float c = default_cnt * ((float)match_int - (float)next_lo);
 959     if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, c, trim_ranges))) {
 960       assert(default_dest != never_reached, "sentinel value for dead destinations");
 961       ranges[++rp].setRange(next_lo, match_int-1, default_dest, c);
 962     }
 963     if (rp < 0 || !ranges[rp].adjoin(match_int, dest, (float)cnt, trim_ranges)) {
 964       assert(dest != never_reached, "sentinel value for dead destinations");
 965       ranges[++rp].set(match_int, dest,  (float)cnt);
 966     }
 967   }
 968   jint highest = table[3*(len-1)];
 969   assert(ranges[rp].hi() == highest, "");
 970   if (highest != max_jint &&
 971       !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest), trim_ranges)) {
 972     ranges[++rp].setRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest));
 973   }
 974   assert(rp < rnum, "not too many ranges");
 975 
 976   if (trim_ranges) {
 977     merge_ranges(ranges, rp);
 978   }
 979 
 980   // Safepoint in case backward branch observed
 981   if (makes_backward_branch) {
 982     add_safepoint();
 983   }
 984 
 985   Node *lookup = pop(); // lookup value
 986   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 987 }
 988 
 989 static float if_prob(float taken_cnt, float total_cnt) {
 990   assert(taken_cnt <= total_cnt, "");
 991   if (total_cnt == 0) {
 992     return PROB_FAIR;
 993   }
 994   float p = taken_cnt / total_cnt;
 995   return clamp(p, PROB_MIN, PROB_MAX);
 996 }
 997 
 998 static float if_cnt(float cnt) {
 999   if (cnt == 0) {
1000     return COUNT_UNKNOWN;
1001   }
1002   return cnt;
1003 }
1004 
1005 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) {
1006   float total_cnt = 0;
1007   for (SwitchRange* sr = lo; sr <= hi; sr++) {
1008     total_cnt += sr->cnt();
1009   }
1010   return total_cnt;
1011 }
1012 
1013 class SwitchRanges : public ResourceObj {
1014 public:
1015   SwitchRange* _lo;
1016   SwitchRange* _hi;
1017   SwitchRange* _mid;
1018   float _cost;
1019 
1020   enum {
1021     Start,
1022     LeftDone,
1023     RightDone,
1024     Done
1025   } _state;
1026 
1027   SwitchRanges(SwitchRange *lo, SwitchRange *hi)
1028     : _lo(lo), _hi(hi), _mid(nullptr),
1029       _cost(0), _state(Start) {
1030   }
1031 
1032   SwitchRanges()
1033     : _lo(nullptr), _hi(nullptr), _mid(nullptr),
1034       _cost(0), _state(Start) {}
1035 };
1036 
1037 // Estimate cost of performing a binary search on lo..hi
1038 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) {
1039   GrowableArray<SwitchRanges> tree;
1040   SwitchRanges root(lo, hi);
1041   tree.push(root);
1042 
1043   float cost = 0;
1044   do {
1045     SwitchRanges& r = *tree.adr_at(tree.length()-1);
1046     if (r._hi != r._lo) {
1047       if (r._mid == nullptr) {
1048         float r_cnt = sum_of_cnts(r._lo, r._hi);
1049 
1050         if (r_cnt == 0) {
1051           tree.pop();
1052           cost = 0;
1053           continue;
1054         }
1055 
1056         SwitchRange* mid = nullptr;
1057         mid = r._lo;
1058         for (float cnt = 0; ; ) {
1059           assert(mid <= r._hi, "out of bounds");
1060           cnt += mid->cnt();
1061           if (cnt > r_cnt / 2) {
1062             break;
1063           }
1064           mid++;
1065         }
1066         assert(mid <= r._hi, "out of bounds");
1067         r._mid = mid;
1068         r._cost = r_cnt / total_cnt;
1069       }
1070       r._cost += cost;
1071       if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) {
1072         cost = 0;
1073         r._state = SwitchRanges::LeftDone;
1074         tree.push(SwitchRanges(r._lo, r._mid-1));
1075       } else if (r._state < SwitchRanges::RightDone) {
1076         cost = 0;
1077         r._state = SwitchRanges::RightDone;
1078         tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi));
1079       } else {
1080         tree.pop();
1081         cost = r._cost;
1082       }
1083     } else {
1084       tree.pop();
1085       cost = r._cost;
1086     }
1087   } while (tree.length() > 0);
1088 
1089 
1090   return cost;
1091 }
1092 
1093 // It sometimes pays off to test most common ranges before the binary search
1094 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) {
1095   uint nr = hi - lo + 1;
1096   float total_cnt = sum_of_cnts(lo, hi);
1097 
1098   float min = compute_tree_cost(lo, hi, total_cnt);
1099   float extra = 1;
1100   float sub = 0;
1101 
1102   SwitchRange* array1 = lo;
1103   SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr);
1104 
1105   SwitchRange* ranges = nullptr;
1106 
1107   while (nr >= 2) {
1108     assert(lo == array1 || lo == array2, "one the 2 already allocated arrays");
1109     ranges = (lo == array1) ? array2 : array1;
1110 
1111     // Find highest frequency range
1112     SwitchRange* candidate = lo;
1113     for (SwitchRange* sr = lo+1; sr <= hi; sr++) {
1114       if (sr->cnt() > candidate->cnt()) {
1115         candidate = sr;
1116       }
1117     }
1118     SwitchRange most_freq = *candidate;
1119     if (most_freq.cnt() == 0) {
1120       break;
1121     }
1122 
1123     // Copy remaining ranges into another array
1124     int shift = 0;
1125     for (uint i = 0; i < nr; i++) {
1126       SwitchRange* sr = &lo[i];
1127       if (sr != candidate) {
1128         ranges[i-shift] = *sr;
1129       } else {
1130         shift++;
1131         if (i > 0 && i < nr-1) {
1132           SwitchRange prev = lo[i-1];
1133           prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.cnt());
1134           if (prev.adjoin(lo[i+1])) {
1135             shift++;
1136             i++;
1137           }
1138           ranges[i-shift] = prev;
1139         }
1140       }
1141     }
1142     nr -= shift;
1143 
1144     // Evaluate cost of testing the most common range and performing a
1145     // binary search on the other ranges
1146     float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt);
1147     if (cost >= min) {
1148       break;
1149     }
1150     // swap arrays
1151     lo = &ranges[0];
1152     hi = &ranges[nr-1];
1153 
1154     // It pays off: emit the test for the most common range
1155     assert(most_freq.cnt() > 0, "must be taken");
1156     Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo())));
1157     Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(java_subtract(most_freq.hi(), most_freq.lo()))));
1158     Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le));
1159     IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt()));
1160     jump_if_true_fork(iff, most_freq.dest(), false);
1161 
1162     sub += most_freq.cnt() / total_cnt;
1163     extra += 1 - sub;
1164     min = cost;
1165   }
1166 }
1167 
1168 //----------------------------create_jump_tables-------------------------------
1169 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
1170   // Are jumptables enabled
1171   if (!UseJumpTables)  return false;
1172 
1173   // Are jumptables supported
1174   if (!Matcher::has_match_rule(Op_Jump))  return false;
1175 
1176   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1177 
1178   // Decide if a guard is needed to lop off big ranges at either (or
1179   // both) end(s) of the input set. We'll call this the default target
1180   // even though we can't be sure that it is the true "default".
1181 
1182   bool needs_guard = false;
1183   int default_dest;
1184   int64_t total_outlier_size = 0;
1185   int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
1186   int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
1187 
1188   if (lo->dest() == hi->dest()) {
1189     total_outlier_size = hi_size + lo_size;
1190     default_dest = lo->dest();
1191   } else if (lo_size > hi_size) {
1192     total_outlier_size = lo_size;
1193     default_dest = lo->dest();
1194   } else {
1195     total_outlier_size = hi_size;
1196     default_dest = hi->dest();
1197   }
1198 
1199   float total = sum_of_cnts(lo, hi);
1200   float cost = compute_tree_cost(lo, hi, total);
1201 
1202   // If a guard test will eliminate very sparse end ranges, then
1203   // it is worth the cost of an extra jump.
1204   float trimmed_cnt = 0;
1205   if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
1206     needs_guard = true;
1207     if (default_dest == lo->dest()) {
1208       trimmed_cnt += lo->cnt();
1209       lo++;
1210     }
1211     if (default_dest == hi->dest()) {
1212       trimmed_cnt += hi->cnt();
1213       hi--;
1214     }
1215   }
1216 
1217   // Find the total number of cases and ranges
1218   int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1;
1219   int num_range = hi - lo + 1;
1220 
1221   // Don't create table if: too large, too small, or too sparse.
1222   if (num_cases > MaxJumpTableSize)
1223     return false;
1224   if (UseSwitchProfiling) {
1225     // MinJumpTableSize is set so with a well balanced binary tree,
1226     // when the number of ranges is MinJumpTableSize, it's cheaper to
1227     // go through a JumpNode that a tree of IfNodes. Average cost of a
1228     // tree of IfNodes with MinJumpTableSize is
1229     // log2f(MinJumpTableSize) comparisons. So if the cost computed
1230     // from profile data is less than log2f(MinJumpTableSize) then
1231     // going with the binary search is cheaper.
1232     if (cost < log2f(MinJumpTableSize)) {
1233       return false;
1234     }
1235   } else {
1236     if (num_cases < MinJumpTableSize)
1237       return false;
1238   }
1239   if (num_cases > (MaxJumpTableSparseness * num_range))
1240     return false;
1241 
1242   // Normalize table lookups to zero
1243   int lowval = lo->lo();
1244   key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) );
1245 
1246   // Generate a guard to protect against input keyvals that aren't
1247   // in the switch domain.
1248   if (needs_guard) {
1249     Node*   size = _gvn.intcon(num_cases);
1250     Node*   cmp = _gvn.transform(new CmpUNode(key_val, size));
1251     Node*   tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge));
1252     IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt));
1253     jump_if_true_fork(iff, default_dest, trim_ranges && trimmed_cnt == 0);
1254 
1255     total -= trimmed_cnt;
1256   }
1257 
1258   // Create an ideal node JumpTable that has projections
1259   // of all possible ranges for a switch statement
1260   // The key_val input must be converted to a pointer offset and scaled.
1261   // Compare Parse::array_addressing above.
1262 
1263   // Clean the 32-bit int into a real 64-bit offset.
1264   // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
1265   // Make I2L conversion control dependent to prevent it from
1266   // floating above the range check during loop optimizations.
1267   // Do not use a narrow int type here to prevent the data path from dying
1268   // while the control path is not removed. This can happen if the type of key_val
1269   // is later known to be out of bounds of [0, num_cases] and therefore a narrow cast
1270   // would be replaced by TOP while C2 is not able to fold the corresponding range checks.
1271   // Set _carry_dependency for the cast to avoid being removed by IGVN.
1272 #ifdef _LP64
1273   key_val = C->constrained_convI2L(&_gvn, key_val, TypeInt::INT, control(), true /* carry_dependency */);
1274 #endif
1275 
1276   // Shift the value by wordsize so we have an index into the table, rather
1277   // than a switch value
1278   Node *shiftWord = _gvn.MakeConX(wordSize);
1279   key_val = _gvn.transform( new MulXNode( key_val, shiftWord));
1280 
1281   // Create the JumpNode
1282   Arena* arena = C->comp_arena();
1283   float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases);
1284   int i = 0;
1285   if (total == 0) {
1286     for (SwitchRange* r = lo; r <= hi; r++) {
1287       for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1288         probs[i] = 1.0F / num_cases;
1289       }
1290     }
1291   } else {
1292     for (SwitchRange* r = lo; r <= hi; r++) {
1293       float prob = r->cnt()/total;
1294       for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1295         probs[i] = prob / (r->hi() - r->lo() + 1);
1296       }
1297     }
1298   }
1299 
1300   ciMethodData* methodData = method()->method_data();
1301   ciMultiBranchData* profile = nullptr;
1302   if (methodData->is_mature()) {
1303     ciProfileData* data = methodData->bci_to_data(bci());
1304     if (data != nullptr && data->is_MultiBranchData()) {
1305       profile = (ciMultiBranchData*)data;
1306     }
1307   }
1308 
1309   Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == nullptr ? COUNT_UNKNOWN : total));
1310 
1311   // These are the switch destinations hanging off the jumpnode
1312   i = 0;
1313   for (SwitchRange* r = lo; r <= hi; r++) {
1314     for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1315       Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
1316       {
1317         PreserveJVMState pjvms(this);
1318         set_control(input);
1319         jump_if_always_fork(r->dest(), trim_ranges && r->cnt() == 0);
1320       }
1321     }
1322   }
1323   assert(i == num_cases, "miscount of cases");
1324   stop_and_kill_map();  // no more uses for this JVMS
1325   return true;
1326 }
1327 
1328 //----------------------------jump_switch_ranges-------------------------------
1329 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
1330   Block* switch_block = block();
1331   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1332 
1333   if (switch_depth == 0) {
1334     // Do special processing for the top-level call.
1335     assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
1336     assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
1337 
1338     // Decrement pred-numbers for the unique set of nodes.
1339 #ifdef ASSERT
1340     if (!trim_ranges) {
1341       // Ensure that the block's successors are a (duplicate-free) set.
1342       int successors_counted = 0;  // block occurrences in [hi..lo]
1343       int unique_successors = switch_block->num_successors();
1344       for (int i = 0; i < unique_successors; i++) {
1345         Block* target = switch_block->successor_at(i);
1346 
1347         // Check that the set of successors is the same in both places.
1348         int successors_found = 0;
1349         for (SwitchRange* p = lo; p <= hi; p++) {
1350           if (p->dest() == target->start())  successors_found++;
1351         }
1352         assert(successors_found > 0, "successor must be known");
1353         successors_counted += successors_found;
1354       }
1355       assert(successors_counted == (hi-lo)+1, "no unexpected successors");
1356     }
1357 #endif
1358 
1359     // Maybe prune the inputs, based on the type of key_val.
1360     jint min_val = min_jint;
1361     jint max_val = max_jint;
1362     const TypeInt* ti = key_val->bottom_type()->isa_int();
1363     if (ti != nullptr) {
1364       min_val = ti->_lo;
1365       max_val = ti->_hi;
1366       assert(min_val <= max_val, "invalid int type");
1367     }
1368     while (lo->hi() < min_val) {
1369       lo++;
1370     }
1371     if (lo->lo() < min_val)  {
1372       lo->setRange(min_val, lo->hi(), lo->dest(), lo->cnt());
1373     }
1374     while (hi->lo() > max_val) {
1375       hi--;
1376     }
1377     if (hi->hi() > max_val) {
1378       hi->setRange(hi->lo(), max_val, hi->dest(), hi->cnt());
1379     }
1380 
1381     linear_search_switch_ranges(key_val, lo, hi);
1382   }
1383 
1384 #ifndef PRODUCT
1385   if (switch_depth == 0) {
1386     _max_switch_depth = 0;
1387     _est_switch_depth = log2i_graceful((hi - lo + 1) - 1) + 1;
1388   }
1389 #endif
1390 
1391   assert(lo <= hi, "must be a non-empty set of ranges");
1392   if (lo == hi) {
1393     jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1394   } else {
1395     assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
1396     assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
1397 
1398     if (create_jump_tables(key_val, lo, hi)) return;
1399 
1400     SwitchRange* mid = nullptr;
1401     float total_cnt = sum_of_cnts(lo, hi);
1402 
1403     int nr = hi - lo + 1;
1404     if (UseSwitchProfiling) {
1405       // Don't keep the binary search tree balanced: pick up mid point
1406       // that split frequencies in half.
1407       float cnt = 0;
1408       for (SwitchRange* sr = lo; sr <= hi; sr++) {
1409         cnt += sr->cnt();
1410         if (cnt >= total_cnt / 2) {
1411           mid = sr;
1412           break;
1413         }
1414       }
1415     } else {
1416       mid = lo + nr/2;
1417 
1418       // if there is an easy choice, pivot at a singleton:
1419       if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton())  mid--;
1420 
1421       assert(lo < mid && mid <= hi, "good pivot choice");
1422       assert(nr != 2 || mid == hi,   "should pick higher of 2");
1423       assert(nr != 3 || mid == hi-1, "should pick middle of 3");
1424     }
1425 
1426 
1427     Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo());
1428 
1429     if (mid->is_singleton()) {
1430       IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt()));
1431       jump_if_false_fork(iff_ne, mid->dest(), trim_ranges && mid->cnt() == 0);
1432 
1433       // Special Case:  If there are exactly three ranges, and the high
1434       // and low range each go to the same place, omit the "gt" test,
1435       // since it will not discriminate anything.
1436       bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo;
1437 
1438       // if there is a higher range, test for it and process it:
1439       if (mid < hi && !eq_test_only) {
1440         // two comparisons of same values--should enable 1 test for 2 branches
1441         // Use BoolTest::lt instead of BoolTest::gt
1442         float cnt = sum_of_cnts(lo, mid-1);
1443         IfNode *iff_lt  = jump_if_fork_int(key_val, test_val, BoolTest::lt, if_prob(cnt, total_cnt), if_cnt(cnt));
1444         Node   *iftrue  = _gvn.transform( new IfTrueNode(iff_lt) );
1445         Node   *iffalse = _gvn.transform( new IfFalseNode(iff_lt) );
1446         { PreserveJVMState pjvms(this);
1447           set_control(iffalse);
1448           jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
1449         }
1450         set_control(iftrue);
1451       }
1452 
1453     } else {
1454       // mid is a range, not a singleton, so treat mid..hi as a unit
1455       float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi);
1456       IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt));
1457 
1458       // if there is a higher range, test for it and process it:
1459       if (mid == hi) {
1460         jump_if_true_fork(iff_ge, mid->dest(), trim_ranges && cnt == 0);
1461       } else {
1462         Node *iftrue  = _gvn.transform( new IfTrueNode(iff_ge) );
1463         Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) );
1464         { PreserveJVMState pjvms(this);
1465           set_control(iftrue);
1466           jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1);
1467         }
1468         set_control(iffalse);
1469       }
1470     }
1471 
1472     // in any case, process the lower range
1473     if (mid == lo) {
1474       if (mid->is_singleton()) {
1475         jump_switch_ranges(key_val, lo+1, hi, switch_depth+1);
1476       } else {
1477         jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1478       }
1479     } else {
1480       jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
1481     }
1482   }
1483 
1484   // Decrease pred_count for each successor after all is done.
1485   if (switch_depth == 0) {
1486     int unique_successors = switch_block->num_successors();
1487     for (int i = 0; i < unique_successors; i++) {
1488       Block* target = switch_block->successor_at(i);
1489       // Throw away the pre-allocated path for each unique successor.
1490       target->next_path_num();
1491     }
1492   }
1493 
1494 #ifndef PRODUCT
1495   _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
1496   if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
1497     SwitchRange* r;
1498     int nsing = 0;
1499     for( r = lo; r <= hi; r++ ) {
1500       if( r->is_singleton() )  nsing++;
1501     }
1502     tty->print(">>> ");
1503     _method->print_short_name();
1504     tty->print_cr(" switch decision tree");
1505     tty->print_cr("    %d ranges (%d singletons), max_depth=%d, est_depth=%d",
1506                   (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth);
1507     if (_max_switch_depth > _est_switch_depth) {
1508       tty->print_cr("******** BAD SWITCH DEPTH ********");
1509     }
1510     tty->print("   ");
1511     for( r = lo; r <= hi; r++ ) {
1512       r->print();
1513     }
1514     tty->cr();
1515   }
1516 #endif
1517 }
1518 
1519 void Parse::modf() {
1520   Node *f2 = pop();
1521   Node *f1 = pop();
1522   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
1523                               CAST_FROM_FN_PTR(address, SharedRuntime::frem),
1524                               "frem", nullptr, //no memory effects
1525                               f1, f2);
1526   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1527 
1528   push(res);
1529 }
1530 
1531 void Parse::modd() {
1532   Node *d2 = pop_pair();
1533   Node *d1 = pop_pair();
1534   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
1535                               CAST_FROM_FN_PTR(address, SharedRuntime::drem),
1536                               "drem", nullptr, //no memory effects
1537                               d1, top(), d2, top());
1538   Node* res_d   = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1539 
1540 #ifdef ASSERT
1541   Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1));
1542   assert(res_top == top(), "second value must be top");
1543 #endif
1544 
1545   push_pair(res_d);
1546 }
1547 
1548 void Parse::l2f() {
1549   Node* f2 = pop();
1550   Node* f1 = pop();
1551   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
1552                               CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
1553                               "l2f", nullptr, //no memory effects
1554                               f1, f2);
1555   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1556 
1557   push(res);
1558 }
1559 
1560 // Handle jsr and jsr_w bytecode
1561 void Parse::do_jsr() {
1562   assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
1563 
1564   // Store information about current state, tagged with new _jsr_bci
1565   int return_bci = iter().next_bci();
1566   int jsr_bci    = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
1567 
1568   // The way we do things now, there is only one successor block
1569   // for the jsr, because the target code is cloned by ciTypeFlow.
1570   Block* target = successor_for_bci(jsr_bci);
1571 
1572   // What got pushed?
1573   const Type* ret_addr = target->peek();
1574   assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
1575 
1576   // Effect on jsr on stack
1577   push(_gvn.makecon(ret_addr));
1578 
1579   // Flow to the jsr.
1580   merge(jsr_bci);
1581 }
1582 
1583 // Handle ret bytecode
1584 void Parse::do_ret() {
1585   // Find to whom we return.
1586   assert(block()->num_successors() == 1, "a ret can only go one place now");
1587   Block* target = block()->successor_at(0);
1588   assert(!target->is_ready(), "our arrival must be expected");
1589   int pnum = target->next_path_num();
1590   merge_common(target, pnum);
1591 }
1592 
1593 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
1594   if (btest != BoolTest::eq && btest != BoolTest::ne) {
1595     // Only ::eq and ::ne are supported for profile injection.
1596     return false;
1597   }
1598   if (test->is_Cmp() &&
1599       test->in(1)->Opcode() == Op_ProfileBoolean) {
1600     ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
1601     int false_cnt = profile->false_count();
1602     int  true_cnt = profile->true_count();
1603 
1604     // Counts matching depends on the actual test operation (::eq or ::ne).
1605     // No need to scale the counts because profile injection was designed
1606     // to feed exact counts into VM.
1607     taken     = (btest == BoolTest::eq) ? false_cnt :  true_cnt;
1608     not_taken = (btest == BoolTest::eq) ?  true_cnt : false_cnt;
1609 
1610     profile->consume();
1611     return true;
1612   }
1613   return false;
1614 }
1615 
1616 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1617 // We also check that individual counters are positive first, otherwise the sum can become positive.
1618 // (check for saturation, integer overflow, and immature counts)
1619 static bool counters_are_meaningful(int counter1, int counter2, int min) {
1620   // check for saturation, including "uint" values too big to fit in "int"
1621   if (counter1 < 0 || counter2 < 0) {
1622     return false;
1623   }
1624   // check for integer overflow of the sum
1625   int64_t sum = (int64_t)counter1 + (int64_t)counter2;
1626   STATIC_ASSERT(sizeof(counter1) < sizeof(sum));
1627   if (sum > INT_MAX) {
1628     return false;
1629   }
1630   // check if mature
1631   return (counter1 + counter2) >= min;
1632 }
1633 
1634 //--------------------------dynamic_branch_prediction--------------------------
1635 // Try to gather dynamic branch prediction behavior.  Return a probability
1636 // of the branch being taken and set the "cnt" field.  Returns a -1.0
1637 // if we need to use static prediction for some reason.
1638 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
1639   ResourceMark rm;
1640 
1641   cnt  = COUNT_UNKNOWN;
1642 
1643   int     taken = 0;
1644   int not_taken = 0;
1645 
1646   bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
1647 
1648   if (use_mdo) {
1649     // Use MethodData information if it is available
1650     // FIXME: free the ProfileData structure
1651     ciMethodData* methodData = method()->method_data();
1652     if (!methodData->is_mature())  return PROB_UNKNOWN;
1653     ciProfileData* data = methodData->bci_to_data(bci());
1654     if (data == nullptr) {
1655       return PROB_UNKNOWN;
1656     }
1657     if (!data->is_JumpData())  return PROB_UNKNOWN;
1658 
1659     // get taken and not taken values
1660     // NOTE: saturated UINT_MAX values become negative,
1661     // as do counts above INT_MAX.
1662     taken = data->as_JumpData()->taken();
1663     not_taken = 0;
1664     if (data->is_BranchData()) {
1665       not_taken = data->as_BranchData()->not_taken();
1666     }
1667 
1668     // scale the counts to be commensurate with invocation counts:
1669     // NOTE: overflow for positive values is clamped at INT_MAX
1670     taken = method()->scale_count(taken);
1671     not_taken = method()->scale_count(not_taken);
1672   }
1673   // At this point, saturation or overflow is indicated by INT_MAX
1674   // or a negative value.
1675 
1676   // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1677   // We also check that individual counters are positive first, otherwise the sum can become positive.
1678   if (!counters_are_meaningful(taken, not_taken, 40)) {
1679     if (C->log() != nullptr) {
1680       C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
1681     }
1682     return PROB_UNKNOWN;
1683   }
1684 
1685   // Compute frequency that we arrive here
1686   float sum = taken + not_taken;
1687   // Adjust, if this block is a cloned private block but the
1688   // Jump counts are shared.  Taken the private counts for
1689   // just this path instead of the shared counts.
1690   if( block()->count() > 0 )
1691     sum = block()->count();
1692   cnt = sum / FreqCountInvocations;
1693 
1694   // Pin probability to sane limits
1695   float prob;
1696   if( !taken )
1697     prob = (0+PROB_MIN) / 2;
1698   else if( !not_taken )
1699     prob = (1+PROB_MAX) / 2;
1700   else {                         // Compute probability of true path
1701     prob = (float)taken / (float)(taken + not_taken);
1702     if (prob > PROB_MAX)  prob = PROB_MAX;
1703     if (prob < PROB_MIN)   prob = PROB_MIN;
1704   }
1705 
1706   assert((cnt > 0.0f) && (prob > 0.0f),
1707          "Bad frequency assignment in if cnt=%g prob=%g taken=%d not_taken=%d", cnt, prob, taken, not_taken);
1708 
1709   if (C->log() != nullptr) {
1710     const char* prob_str = nullptr;
1711     if (prob >= PROB_MAX)  prob_str = (prob == PROB_MAX) ? "max" : "always";
1712     if (prob <= PROB_MIN)  prob_str = (prob == PROB_MIN) ? "min" : "never";
1713     char prob_str_buf[30];
1714     if (prob_str == nullptr) {
1715       jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob);
1716       prob_str = prob_str_buf;
1717     }
1718     C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
1719                    iter().get_dest(), taken, not_taken, cnt, prob_str);
1720   }
1721   return prob;
1722 }
1723 
1724 //-----------------------------branch_prediction-------------------------------
1725 float Parse::branch_prediction(float& cnt,
1726                                BoolTest::mask btest,
1727                                int target_bci,
1728                                Node* test) {
1729   float prob = dynamic_branch_prediction(cnt, btest, test);
1730   // If prob is unknown, switch to static prediction
1731   if (prob != PROB_UNKNOWN)  return prob;
1732 
1733   prob = PROB_FAIR;                   // Set default value
1734   if (btest == BoolTest::eq)          // Exactly equal test?
1735     prob = PROB_STATIC_INFREQUENT;    // Assume its relatively infrequent
1736   else if (btest == BoolTest::ne)
1737     prob = PROB_STATIC_FREQUENT;      // Assume its relatively frequent
1738 
1739   // If this is a conditional test guarding a backwards branch,
1740   // assume its a loop-back edge.  Make it a likely taken branch.
1741   if (target_bci < bci()) {
1742     if (is_osr_parse()) {    // Could be a hot OSR'd loop; force deopt
1743       // Since it's an OSR, we probably have profile data, but since
1744       // branch_prediction returned PROB_UNKNOWN, the counts are too small.
1745       // Let's make a special check here for completely zero counts.
1746       ciMethodData* methodData = method()->method_data();
1747       if (!methodData->is_empty()) {
1748         ciProfileData* data = methodData->bci_to_data(bci());
1749         // Only stop for truly zero counts, which mean an unknown part
1750         // of the OSR-ed method, and we want to deopt to gather more stats.
1751         // If you have ANY counts, then this loop is simply 'cold' relative
1752         // to the OSR loop.
1753         if (data == nullptr ||
1754             (data->as_BranchData()->taken() +  data->as_BranchData()->not_taken() == 0)) {
1755           // This is the only way to return PROB_UNKNOWN:
1756           return PROB_UNKNOWN;
1757         }
1758       }
1759     }
1760     prob = PROB_STATIC_FREQUENT;     // Likely to take backwards branch
1761   }
1762 
1763   assert(prob != PROB_UNKNOWN, "must have some guess at this point");
1764   return prob;
1765 }
1766 
1767 // The magic constants are chosen so as to match the output of
1768 // branch_prediction() when the profile reports a zero taken count.
1769 // It is important to distinguish zero counts unambiguously, because
1770 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
1771 // very small but nonzero probabilities, which if confused with zero
1772 // counts would keep the program recompiling indefinitely.
1773 bool Parse::seems_never_taken(float prob) const {
1774   return prob < PROB_MIN;
1775 }
1776 
1777 //-------------------------------repush_if_args--------------------------------
1778 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
1779 inline int Parse::repush_if_args() {
1780   if (PrintOpto && WizardMode) {
1781     tty->print("defending against excessive implicit null exceptions on %s @%d in ",
1782                Bytecodes::name(iter().cur_bc()), iter().cur_bci());
1783     method()->print_name(); tty->cr();
1784   }
1785   int bc_depth = - Bytecodes::depth(iter().cur_bc());
1786   assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
1787   DEBUG_ONLY(sync_jvms());   // argument(n) requires a synced jvms
1788   assert(argument(0) != nullptr, "must exist");
1789   assert(bc_depth == 1 || argument(1) != nullptr, "two must exist");
1790   inc_sp(bc_depth);
1791   return bc_depth;
1792 }
1793 
1794 // Used by StressUnstableIfTraps
1795 static volatile int _trap_stress_counter = 0;
1796 
1797 void Parse::increment_trap_stress_counter(Node*& counter, Node*& incr_store) {
1798   Node* counter_addr = makecon(TypeRawPtr::make((address)&_trap_stress_counter));
1799   counter = make_load(control(), counter_addr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
1800   counter = _gvn.transform(new AddINode(counter, intcon(1)));
1801   incr_store = store_to_memory(control(), counter_addr, counter, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
1802 }
1803 
1804 //----------------------------------do_ifnull----------------------------------
1805 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
1806   int target_bci = iter().get_dest();
1807 
1808   Node* counter = nullptr;
1809   Node* incr_store = nullptr;
1810   bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1811   if (do_stress_trap) {
1812     increment_trap_stress_counter(counter, incr_store);
1813   }
1814 
1815   Block* branch_block = successor_for_bci(target_bci);
1816   Block* next_block   = successor_for_bci(iter().next_bci());
1817 
1818   float cnt;
1819   float prob = branch_prediction(cnt, btest, target_bci, c);
1820   if (prob == PROB_UNKNOWN) {
1821     // (An earlier version of do_ifnull omitted this trap for OSR methods.)
1822     if (PrintOpto && Verbose) {
1823       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1824     }
1825     repush_if_args(); // to gather stats on loop
1826     uncommon_trap(Deoptimization::Reason_unreached,
1827                   Deoptimization::Action_reinterpret,
1828                   nullptr, "cold");
1829     if (C->eliminate_boxing()) {
1830       // Mark the successor blocks as parsed
1831       branch_block->next_path_num();
1832       next_block->next_path_num();
1833     }
1834     return;
1835   }
1836 
1837   NOT_PRODUCT(explicit_null_checks_inserted++);
1838 
1839   // Generate real control flow
1840   Node   *tst = _gvn.transform( new BoolNode( c, btest ) );
1841 
1842   // Sanity check the probability value
1843   assert(prob > 0.0f,"Bad probability in Parser");
1844  // Need xform to put node in hash table
1845   IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1846   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1847   // True branch
1848   { PreserveJVMState pjvms(this);
1849     Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1850     set_control(iftrue);
1851 
1852     if (stopped()) {            // Path is dead?
1853       NOT_PRODUCT(explicit_null_checks_elided++);
1854       if (C->eliminate_boxing()) {
1855         // Mark the successor block as parsed
1856         branch_block->next_path_num();
1857       }
1858     } else {                    // Path is live.
1859       adjust_map_after_if(btest, c, prob, branch_block);
1860       if (!stopped()) {
1861         merge(target_bci);
1862       }
1863     }
1864   }
1865 
1866   // False branch
1867   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1868   set_control(iffalse);
1869 
1870   if (stopped()) {              // Path is dead?
1871     NOT_PRODUCT(explicit_null_checks_elided++);
1872     if (C->eliminate_boxing()) {
1873       // Mark the successor block as parsed
1874       next_block->next_path_num();
1875     }
1876   } else  {                     // Path is live.
1877     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1878   }
1879 
1880   if (do_stress_trap) {
1881     stress_trap(iff, counter, incr_store);
1882   }
1883 }
1884 
1885 //------------------------------------do_if------------------------------------
1886 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken) {
1887   int target_bci = iter().get_dest();
1888 
1889   Block* branch_block = successor_for_bci(target_bci);
1890   Block* next_block   = successor_for_bci(iter().next_bci());
1891 
1892   float cnt;
1893   float prob = branch_prediction(cnt, btest, target_bci, c);
1894   float untaken_prob = 1.0 - prob;
1895 
1896   if (prob == PROB_UNKNOWN) {
1897     if (PrintOpto && Verbose) {
1898       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1899     }
1900     repush_if_args(); // to gather stats on loop
1901     uncommon_trap(Deoptimization::Reason_unreached,
1902                   Deoptimization::Action_reinterpret,
1903                   nullptr, "cold");
1904     if (C->eliminate_boxing()) {
1905       // Mark the successor blocks as parsed
1906       branch_block->next_path_num();
1907       next_block->next_path_num();
1908     }
1909     return;
1910   }
1911 
1912   Node* counter = nullptr;
1913   Node* incr_store = nullptr;
1914   bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1915   if (do_stress_trap) {
1916     increment_trap_stress_counter(counter, incr_store);
1917   }
1918 
1919   // Sanity check the probability value
1920   assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1921 
1922   bool taken_if_true = true;
1923   // Convert BoolTest to canonical form:
1924   if (!BoolTest(btest).is_canonical()) {
1925     btest         = BoolTest(btest).negate();
1926     taken_if_true = false;
1927     // prob is NOT updated here; it remains the probability of the taken
1928     // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1929   }
1930   assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1931 
1932   Node* tst0 = new BoolNode(c, btest);
1933   Node* tst = _gvn.transform(tst0);
1934   BoolTest::mask taken_btest   = BoolTest::illegal;
1935   BoolTest::mask untaken_btest = BoolTest::illegal;
1936 
1937   if (tst->is_Bool()) {
1938     // Refresh c from the transformed bool node, since it may be
1939     // simpler than the original c.  Also re-canonicalize btest.
1940     // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p null)).
1941     // That can arise from statements like: if (x instanceof C) ...
1942     if (tst != tst0) {
1943       // Canonicalize one more time since transform can change it.
1944       btest = tst->as_Bool()->_test._test;
1945       if (!BoolTest(btest).is_canonical()) {
1946         // Reverse edges one more time...
1947         tst   = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1948         btest = tst->as_Bool()->_test._test;
1949         assert(BoolTest(btest).is_canonical(), "sanity");
1950         taken_if_true = !taken_if_true;
1951       }
1952       c = tst->in(1);
1953     }
1954     BoolTest::mask neg_btest = BoolTest(btest).negate();
1955     taken_btest   = taken_if_true ?     btest : neg_btest;
1956     untaken_btest = taken_if_true ? neg_btest :     btest;
1957   }
1958 
1959   // Generate real control flow
1960   float true_prob = (taken_if_true ? prob : untaken_prob);
1961   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1962   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1963   Node* taken_branch   = new IfTrueNode(iff);
1964   Node* untaken_branch = new IfFalseNode(iff);
1965   if (!taken_if_true) {  // Finish conversion to canonical form
1966     Node* tmp      = taken_branch;
1967     taken_branch   = untaken_branch;
1968     untaken_branch = tmp;
1969   }
1970 
1971   // Branch is taken:
1972   { PreserveJVMState pjvms(this);
1973     taken_branch = _gvn.transform(taken_branch);
1974     set_control(taken_branch);
1975 
1976     if (stopped()) {
1977       if (C->eliminate_boxing() && !new_path) {
1978         // Mark the successor block as parsed (if we haven't created a new path)
1979         branch_block->next_path_num();
1980       }
1981     } else {
1982       adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1983       if (!stopped()) {
1984         if (new_path) {
1985           // Merge by using a new path
1986           merge_new_path(target_bci);
1987         } else if (ctrl_taken != nullptr) {
1988           // Don't merge but save taken branch to be wired by caller
1989           *ctrl_taken = control();
1990         } else {
1991           merge(target_bci);
1992         }
1993       }
1994     }
1995   }
1996 
1997   untaken_branch = _gvn.transform(untaken_branch);
1998   set_control(untaken_branch);
1999 
2000   // Branch not taken.
2001   if (stopped() && ctrl_taken == nullptr) {
2002     if (C->eliminate_boxing()) {
2003       // Mark the successor block as parsed (if caller does not re-wire control flow)
2004       next_block->next_path_num();
2005     }
2006   } else {
2007     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
2008   }
2009 
2010   if (do_stress_trap) {
2011     stress_trap(iff, counter, incr_store);
2012   }
2013 }
2014 
2015 
2016 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
2017   if (t->speculative() == nullptr) {
2018     return ProfileUnknownNull;
2019   }
2020   if (t->speculative_always_null()) {
2021     return ProfileAlwaysNull;
2022   }
2023   if (t->speculative_maybe_null()) {
2024     return ProfileMaybeNull;
2025   }
2026   return ProfileNeverNull;
2027 }
2028 
2029 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
2030   inc_sp(2);
2031   Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
2032                                  !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
2033                                  speculative_ptr_kind(tinput) == ProfileAlwaysNull);
2034   dec_sp(2);
2035   if (btest == BoolTest::ne) {
2036     {
2037       PreserveJVMState pjvms(this);
2038       replace_in_map(input, cast);
2039       int target_bci = iter().get_dest();
2040       merge(target_bci);
2041     }
2042     record_for_igvn(eq_region);
2043     set_control(_gvn.transform(eq_region));
2044   } else {
2045     replace_in_map(input, cast);
2046   }
2047 }
2048 
2049 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
2050   inc_sp(2);
2051   null_ctl = top();
2052   Node* cast = null_check_oop(input, &null_ctl,
2053                               input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
2054                               false,
2055                               speculative_ptr_kind(tinput) == ProfileNeverNull &&
2056                               !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
2057   dec_sp(2);
2058   assert(!stopped(), "null input should have been caught earlier");
2059   return cast;
2060 }
2061 
2062 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
2063   Node* ne_region = new RegionNode(1);
2064   Node* null_ctl;
2065   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2066   ne_region->add_req(null_ctl);
2067 
2068   Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
2069   {
2070     PreserveJVMState pjvms(this);
2071     inc_sp(2);
2072     set_control(slow_ctl);
2073     Deoptimization::DeoptReason reason;
2074     if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2075       reason = Deoptimization::Reason_speculate_class_check;
2076     } else {
2077       reason = Deoptimization::Reason_class_check;
2078     }
2079     uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
2080   }
2081   ne_region->add_req(control());
2082 
2083   record_for_igvn(ne_region);
2084   set_control(_gvn.transform(ne_region));
2085   if (btest == BoolTest::ne) {
2086     {
2087       PreserveJVMState pjvms(this);
2088       if (null_ctl == top()) {
2089         replace_in_map(input, cast);
2090       }
2091       int target_bci = iter().get_dest();
2092       merge(target_bci);
2093     }
2094     record_for_igvn(eq_region);
2095     set_control(_gvn.transform(eq_region));
2096   } else {
2097     if (null_ctl == top()) {
2098       replace_in_map(input, cast);
2099     }
2100     set_control(_gvn.transform(ne_region));
2101   }
2102 }
2103 
2104 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
2105   Node* ne_region = new RegionNode(1);
2106   Node* null_ctl;
2107   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2108   ne_region->add_req(null_ctl);
2109 
2110   {
2111     BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2112     inc_sp(2);
2113     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2114   }
2115 
2116   ne_region->add_req(control());
2117 
2118   record_for_igvn(ne_region);
2119   set_control(_gvn.transform(ne_region));
2120   if (btest == BoolTest::ne) {
2121     {
2122       PreserveJVMState pjvms(this);
2123       if (null_ctl == top()) {
2124         replace_in_map(input, cast);
2125       }
2126       int target_bci = iter().get_dest();
2127       merge(target_bci);
2128     }
2129     record_for_igvn(eq_region);
2130     set_control(_gvn.transform(eq_region));
2131   } else {
2132     if (null_ctl == top()) {
2133       replace_in_map(input, cast);
2134     }
2135     set_control(_gvn.transform(ne_region));
2136   }
2137 }
2138 
2139 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2140   ciKlass* left_type = nullptr;
2141   ciKlass* right_type = nullptr;
2142   ProfilePtrKind left_ptr = ProfileUnknownNull;
2143   ProfilePtrKind right_ptr = ProfileUnknownNull;
2144   bool left_inline_type = true;
2145   bool right_inline_type = true;
2146 
2147   // Leverage profiling at acmp
2148   if (UseACmpProfile) {
2149     method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2150     if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2151       left_type = nullptr;
2152       right_type = nullptr;
2153       left_inline_type = true;
2154       right_inline_type = true;
2155     }
2156     if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2157       left_ptr = ProfileUnknownNull;
2158       right_ptr = ProfileUnknownNull;
2159     }
2160   }
2161 
2162   if (UseTypeSpeculation) {
2163     record_profile_for_speculation(left, left_type, left_ptr);
2164     record_profile_for_speculation(right, right_type, right_ptr);
2165   }
2166 
2167   if (!EnableValhalla) {
2168     Node* cmp = CmpP(left, right);
2169     cmp = optimize_cmp_with_klass(cmp);
2170     do_if(btest, cmp);
2171     return;
2172   }
2173 
2174   // Check for equality before potentially allocating
2175   if (left == right) {
2176     do_if(btest, makecon(TypeInt::CC_EQ));
2177     return;
2178   }
2179 
2180   // Allocate inline type operands and re-execute on deoptimization
2181   if (left->is_InlineType()) {
2182     if (_gvn.type(right)->is_zero_type() ||
2183         (right->is_InlineType() && _gvn.type(right->as_InlineType()->get_is_init())->is_zero_type())) {
2184       // Null checking a scalarized but nullable inline type. Check the IsInit
2185       // input instead of the oop input to avoid keeping buffer allocations alive.
2186       Node* cmp = CmpI(left->as_InlineType()->get_is_init(), intcon(0));
2187       do_if(btest, cmp);
2188       return;
2189     } else {
2190       PreserveReexecuteState preexecs(this);
2191       inc_sp(2);
2192       jvms()->set_should_reexecute(true);
2193       left = left->as_InlineType()->buffer(this)->get_oop();
2194     }
2195   }
2196   if (right->is_InlineType()) {
2197     PreserveReexecuteState preexecs(this);
2198     inc_sp(2);
2199     jvms()->set_should_reexecute(true);
2200     right = right->as_InlineType()->buffer(this)->get_oop();
2201   }
2202 
2203   // First, do a normal pointer comparison
2204   const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2205   const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2206   Node* cmp = CmpP(left, right);
2207   cmp = optimize_cmp_with_klass(cmp);
2208   if (tleft == nullptr || !tleft->can_be_inline_type() ||
2209       tright == nullptr || !tright->can_be_inline_type()) {
2210     // This is sufficient, if one of the operands can't be an inline type
2211     do_if(btest, cmp);
2212     return;
2213   }
2214 
2215   // Don't add traps to unstable if branches because additional checks are required to
2216   // decide if the operands are equal/substitutable and we therefore shouldn't prune
2217   // branches for one if based on the profiling of the acmp branches.
2218   // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2219   // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2220   // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2221   const bool can_trap = true;
2222 
2223   Node* eq_region = nullptr;
2224   if (btest == BoolTest::eq) {
2225     do_if(btest, cmp, !can_trap, true);
2226     if (stopped()) {
2227       // Pointers are equal, operands must be equal
2228       return;
2229     }
2230   } else {
2231     assert(btest == BoolTest::ne, "only eq or ne");
2232     Node* is_not_equal = nullptr;
2233     eq_region = new RegionNode(3);
2234     {
2235       PreserveJVMState pjvms(this);
2236       // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2237       do_if(btest, cmp, !can_trap, false, &is_not_equal);
2238       if (!stopped()) {
2239         eq_region->init_req(1, control());
2240       }
2241     }
2242     if (is_not_equal == nullptr || is_not_equal->is_top()) {
2243       record_for_igvn(eq_region);
2244       set_control(_gvn.transform(eq_region));
2245       return;
2246     }
2247     set_control(is_not_equal);
2248   }
2249 
2250   // Prefer speculative types if available
2251   if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2252     if (tleft->speculative_type() != nullptr) {
2253       left_type = tleft->speculative_type();
2254     }
2255     if (tright->speculative_type() != nullptr) {
2256       right_type = tright->speculative_type();
2257     }
2258   }
2259 
2260   if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2261     ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2262     if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2263       left_ptr = speculative_left_ptr;
2264     } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2265       left_ptr = speculative_left_ptr;
2266     }
2267   }
2268   if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2269     ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2270     if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2271       right_ptr = speculative_right_ptr;
2272     } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2273       right_ptr = speculative_right_ptr;
2274     }
2275   }
2276 
2277   if (left_ptr == ProfileAlwaysNull) {
2278     // Comparison with null. Assert the input is indeed null and we're done.
2279     acmp_always_null_input(left, tleft, btest, eq_region);
2280     return;
2281   }
2282   if (right_ptr == ProfileAlwaysNull) {
2283     // Comparison with null. Assert the input is indeed null and we're done.
2284     acmp_always_null_input(right, tright, btest, eq_region);
2285     return;
2286   }
2287   if (left_type != nullptr && !left_type->is_inlinetype()) {
2288     // Comparison with an object of known type
2289     acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2290     return;
2291   }
2292   if (right_type != nullptr && !right_type->is_inlinetype()) {
2293     // Comparison with an object of known type
2294     acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2295     return;
2296   }
2297   if (!left_inline_type) {
2298     // Comparison with an object known not to be an inline type
2299     acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2300     return;
2301   }
2302   if (!right_inline_type) {
2303     // Comparison with an object known not to be an inline type
2304     acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2305     return;
2306   }
2307 
2308   // Pointers are not equal, check if first operand is non-null
2309   Node* ne_region = new RegionNode(6);
2310   Node* null_ctl;
2311   Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2312   ne_region->init_req(1, null_ctl);
2313 
2314   // First operand is non-null, check if it is an inline type
2315   Node* is_value = inline_type_test(not_null_right);
2316   IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2317   Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2318   ne_region->init_req(2, not_value);
2319   set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2320 
2321   // The first operand is an inline type, check if the second operand is non-null
2322   Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2323   ne_region->init_req(3, null_ctl);
2324 
2325   // Check if both operands are of the same class.
2326   Node* kls_left = load_object_klass(not_null_left);
2327   Node* kls_right = load_object_klass(not_null_right);
2328   Node* kls_cmp = CmpP(kls_left, kls_right);
2329   Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2330   IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2331   Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2332   set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2333   ne_region->init_req(4, kls_ne);
2334 
2335   if (stopped()) {
2336     record_for_igvn(ne_region);
2337     set_control(_gvn.transform(ne_region));
2338     if (btest == BoolTest::ne) {
2339       {
2340         PreserveJVMState pjvms(this);
2341         int target_bci = iter().get_dest();
2342         merge(target_bci);
2343       }
2344       record_for_igvn(eq_region);
2345       set_control(_gvn.transform(eq_region));
2346     }
2347     return;
2348   }
2349 
2350   // Both operands are values types of the same class, we need to perform a
2351   // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2352   Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2353   Node* mem = reset_memory();
2354   Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2355 
2356   Node* eq_io_phi = nullptr;
2357   Node* eq_mem_phi = nullptr;
2358   if (eq_region != nullptr) {
2359     eq_io_phi = PhiNode::make(eq_region, i_o());
2360     eq_mem_phi = PhiNode::make(eq_region, mem);
2361   }
2362 
2363   set_all_memory(mem);
2364 
2365   kill_dead_locals();
2366   ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature());
2367   CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2368   call->set_override_symbolic_info(true);
2369   call->init_req(TypeFunc::Parms, not_null_left);
2370   call->init_req(TypeFunc::Parms+1, not_null_right);
2371   inc_sp(2);
2372   set_edges_for_java_call(call, false, false);
2373   Node* ret = set_results_for_java_call(call, false, true);
2374   dec_sp(2);
2375 
2376   // Test the return value of ValueObjectMethods::isSubstitutable()
2377   // This is the last check, do_if can emit traps now.
2378   Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2379   Node* ctl = C->top();
2380   if (btest == BoolTest::eq) {
2381     PreserveJVMState pjvms(this);
2382     do_if(btest, subst_cmp, can_trap);
2383     if (!stopped()) {
2384       ctl = control();
2385     }
2386   } else {
2387     assert(btest == BoolTest::ne, "only eq or ne");
2388     PreserveJVMState pjvms(this);
2389     do_if(btest, subst_cmp, can_trap, false, &ctl);
2390     if (!stopped()) {
2391       eq_region->init_req(2, control());
2392       eq_io_phi->init_req(2, i_o());
2393       eq_mem_phi->init_req(2, reset_memory());
2394     }
2395   }
2396   ne_region->init_req(5, ctl);
2397   ne_io_phi->init_req(5, i_o());
2398   ne_mem_phi->init_req(5, reset_memory());
2399 
2400   record_for_igvn(ne_region);
2401   set_control(_gvn.transform(ne_region));
2402   set_i_o(_gvn.transform(ne_io_phi));
2403   set_all_memory(_gvn.transform(ne_mem_phi));
2404 
2405   if (btest == BoolTest::ne) {
2406     {
2407       PreserveJVMState pjvms(this);
2408       int target_bci = iter().get_dest();
2409       merge(target_bci);
2410     }
2411 
2412     record_for_igvn(eq_region);
2413     set_control(_gvn.transform(eq_region));
2414     set_i_o(_gvn.transform(eq_io_phi));
2415     set_all_memory(_gvn.transform(eq_mem_phi));
2416   }
2417 }
2418 
2419 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
2420 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
2421 // then either takes the trap or executes the original, unstable if.
2422 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
2423   // Search for an unstable if trap
2424   CallStaticJavaNode* trap = nullptr;
2425   assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
2426   ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
2427   if (trap == nullptr || !trap->jvms()->should_reexecute()) {
2428     // No suitable trap found. Remove unused counter load and increment.
2429     C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
2430     return;
2431   }
2432 
2433   // Remove trap from optimization list since we add another path to the trap.
2434   bool success = C->remove_unstable_if_trap(trap, true);
2435   assert(success, "Trap already modified");
2436 
2437   // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
2438   int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
2439   Node* mask = intcon(right_n_bits(freq_log));
2440   counter = _gvn.transform(new AndINode(counter, mask));
2441   Node* cmp = _gvn.transform(new CmpINode(counter, intcon(0)));
2442   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::mask::eq));
2443   IfNode* iff = _gvn.transform(new IfNode(orig_iff->in(0), bol, orig_iff->_prob, orig_iff->_fcnt))->as_If();
2444   Node* if_true = _gvn.transform(new IfTrueNode(iff));
2445   Node* if_false = _gvn.transform(new IfFalseNode(iff));
2446   assert(!if_true->is_top() && !if_false->is_top(), "trap always / never taken");
2447 
2448   // Trap
2449   assert(trap_proj->outcnt() == 1, "some other nodes are dependent on the trap projection");
2450 
2451   Node* trap_region = new RegionNode(3);
2452   trap_region->set_req(1, trap_proj);
2453   trap_region->set_req(2, if_true);
2454   trap->set_req(0, _gvn.transform(trap_region));
2455 
2456   // Don't trap, execute original if
2457   orig_iff->set_req(0, if_false);
2458 }
2459 
2460 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2461   // Randomly skip emitting an uncommon trap
2462   if (StressUnstableIfTraps && ((C->random() % 2) == 0)) {
2463     return false;
2464   }
2465   // Don't want to speculate on uncommon traps when running with -Xcomp
2466   if (!UseInterpreter) {
2467     return false;
2468   }
2469   return seems_never_taken(prob) &&
2470          !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
2471 }
2472 
2473 void Parse::maybe_add_predicate_after_if(Block* path) {
2474   if (path->is_SEL_head() && path->preds_parsed() == 0) {
2475     // Add predicates at bci of if dominating the loop so traps can be
2476     // recorded on the if's profile data
2477     int bc_depth = repush_if_args();
2478     add_parse_predicates();
2479     dec_sp(bc_depth);
2480     path->set_has_predicates();
2481   }
2482 }
2483 
2484 
2485 //----------------------------adjust_map_after_if------------------------------
2486 // Adjust the JVM state to reflect the result of taking this path.
2487 // Basically, it means inspecting the CmpNode controlling this
2488 // branch, seeing how it constrains a tested value, and then
2489 // deciding if it's worth our while to encode this constraint
2490 // as graph nodes in the current abstract interpretation map.
2491 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2492   if (!c->is_Cmp()) {
2493     maybe_add_predicate_after_if(path);
2494     return;
2495   }
2496 
2497   if (stopped() || btest == BoolTest::illegal) {
2498     return;                             // nothing to do
2499   }
2500 
2501   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2502 
2503   if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2504     repush_if_args();
2505     Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2506                   Deoptimization::Action_reinterpret,
2507                   nullptr,
2508                   (is_fallthrough ? "taken always" : "taken never"));
2509 
2510     if (call != nullptr) {
2511       C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2512     }
2513     return;
2514   }
2515 
2516   Node* val = c->in(1);
2517   Node* con = c->in(2);
2518   const Type* tcon = _gvn.type(con);
2519   const Type* tval = _gvn.type(val);
2520   bool have_con = tcon->singleton();
2521   if (tval->singleton()) {
2522     if (!have_con) {
2523       // Swap, so constant is in con.
2524       con  = val;
2525       tcon = tval;
2526       val  = c->in(2);
2527       tval = _gvn.type(val);
2528       btest = BoolTest(btest).commute();
2529       have_con = true;
2530     } else {
2531       // Do we have two constants?  Then leave well enough alone.
2532       have_con = false;
2533     }
2534   }
2535   if (!have_con) {                        // remaining adjustments need a con
2536     maybe_add_predicate_after_if(path);
2537     return;
2538   }
2539 
2540   sharpen_type_after_if(btest, con, tcon, val, tval);
2541   maybe_add_predicate_after_if(path);
2542 }
2543 
2544 
2545 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
2546   Node* ldk;
2547   if (n->is_DecodeNKlass()) {
2548     if (n->in(1)->Opcode() != Op_LoadNKlass) {
2549       return nullptr;
2550     } else {
2551       ldk = n->in(1);
2552     }
2553   } else if (n->Opcode() != Op_LoadKlass) {
2554     return nullptr;
2555   } else {
2556     ldk = n;
2557   }
2558   assert(ldk != nullptr && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
2559 
2560   Node* adr = ldk->in(MemNode::Address);
2561   intptr_t off = 0;
2562   Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
2563   if (obj == nullptr || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
2564     return nullptr;
2565   const TypePtr* tp = gvn->type(obj)->is_ptr();
2566   if (tp == nullptr || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
2567     return nullptr;
2568 
2569   return obj;
2570 }
2571 
2572 void Parse::sharpen_type_after_if(BoolTest::mask btest,
2573                                   Node* con, const Type* tcon,
2574                                   Node* val, const Type* tval) {
2575   // Look for opportunities to sharpen the type of a node
2576   // whose klass is compared with a constant klass.
2577   if (btest == BoolTest::eq && tcon->isa_klassptr()) {
2578     Node* obj = extract_obj_from_klass_load(&_gvn, val);
2579     const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
2580     if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
2581        // Found:
2582        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2583        // or the narrowOop equivalent.
2584        const Type* obj_type = _gvn.type(obj);
2585        const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
2586        if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
2587            tboth->higher_equal(obj_type)) {
2588           // obj has to be of the exact type Foo if the CmpP succeeds.
2589           int obj_in_map = map()->find_edge(obj);
2590           JVMState* jvms = this->jvms();
2591           if (obj_in_map >= 0 &&
2592               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2593             TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2594             const Type* tcc = ccast->as_Type()->type();
2595             assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2596             // Delay transform() call to allow recovery of pre-cast value
2597             // at the control merge.
2598             _gvn.set_type_bottom(ccast);
2599             record_for_igvn(ccast);
2600             if (tboth->is_inlinetypeptr()) {
2601               ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2602             }
2603             // Here's the payoff.
2604             replace_in_map(obj, ccast);
2605           }
2606        }
2607     }
2608   }
2609 
2610   int val_in_map = map()->find_edge(val);
2611   if (val_in_map < 0)  return;          // replace_in_map would be useless
2612   {
2613     JVMState* jvms = this->jvms();
2614     if (!(jvms->is_loc(val_in_map) ||
2615           jvms->is_stk(val_in_map)))
2616       return;                           // again, it would be useless
2617   }
2618 
2619   // Check for a comparison to a constant, and "know" that the compared
2620   // value is constrained on this path.
2621   assert(tcon->singleton(), "");
2622   ConstraintCastNode* ccast = nullptr;
2623   Node* cast = nullptr;
2624 
2625   switch (btest) {
2626   case BoolTest::eq:                    // Constant test?
2627     {
2628       const Type* tboth = tcon->join_speculative(tval);
2629       if (tboth == tval)  break;        // Nothing to gain.
2630       if (tcon->isa_int()) {
2631         ccast = new CastIINode(control(), val, tboth);
2632       } else if (tcon == TypePtr::NULL_PTR) {
2633         // Cast to null, but keep the pointer identity temporarily live.
2634         ccast = new CastPPNode(control(), val, tboth);
2635       } else {
2636         const TypeF* tf = tcon->isa_float_constant();
2637         const TypeD* td = tcon->isa_double_constant();
2638         // Exclude tests vs float/double 0 as these could be
2639         // either +0 or -0.  Just because you are equal to +0
2640         // doesn't mean you ARE +0!
2641         // Note, following code also replaces Long and Oop values.
2642         if ((!tf || tf->_f != 0.0) &&
2643             (!td || td->_d != 0.0))
2644           cast = con;                   // Replace non-constant val by con.
2645       }
2646     }
2647     break;
2648 
2649   case BoolTest::ne:
2650     if (tcon == TypePtr::NULL_PTR) {
2651       cast = cast_not_null(val, false);
2652     }
2653     break;
2654 
2655   default:
2656     // (At this point we could record int range types with CastII.)
2657     break;
2658   }
2659 
2660   if (ccast != nullptr) {
2661     const Type* tcc = ccast->as_Type()->type();
2662     assert(tcc != tval && tcc->higher_equal(tval), "must improve");
2663     // Delay transform() call to allow recovery of pre-cast value
2664     // at the control merge.
2665     _gvn.set_type_bottom(ccast);
2666     record_for_igvn(ccast);
2667     cast = ccast;
2668   }
2669 
2670   if (cast != nullptr) {                   // Here's the payoff.
2671     replace_in_map(val, cast);
2672   }
2673 }
2674 
2675 /**
2676  * Use speculative type to optimize CmpP node: if comparison is
2677  * against the low level class, cast the object to the speculative
2678  * type if any. CmpP should then go away.
2679  *
2680  * @param c  expected CmpP node
2681  * @return   result of CmpP on object casted to speculative type
2682  *
2683  */
2684 Node* Parse::optimize_cmp_with_klass(Node* c) {
2685   // If this is transformed by the _gvn to a comparison with the low
2686   // level klass then we may be able to use speculation
2687   if (c->Opcode() == Op_CmpP &&
2688       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2689       c->in(2)->is_Con()) {
2690     Node* load_klass = nullptr;
2691     Node* decode = nullptr;
2692     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2693       decode = c->in(1);
2694       load_klass = c->in(1)->in(1);
2695     } else {
2696       load_klass = c->in(1);
2697     }
2698     if (load_klass->in(2)->is_AddP()) {
2699       Node* addp = load_klass->in(2);
2700       Node* obj = addp->in(AddPNode::Address);
2701       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2702       if (obj_type->speculative_type_not_null() != nullptr) {
2703         ciKlass* k = obj_type->speculative_type();
2704         inc_sp(2);
2705         obj = maybe_cast_profiled_obj(obj, k);
2706         dec_sp(2);
2707         if (obj->is_InlineType()) {
2708           assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2709           obj = obj->as_InlineType()->get_oop();
2710         }
2711         // Make the CmpP use the casted obj
2712         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2713         load_klass = load_klass->clone();
2714         load_klass->set_req(2, addp);
2715         load_klass = _gvn.transform(load_klass);
2716         if (decode != nullptr) {
2717           decode = decode->clone();
2718           decode->set_req(1, load_klass);
2719           load_klass = _gvn.transform(decode);
2720         }
2721         c = c->clone();
2722         c->set_req(1, load_klass);
2723         c = _gvn.transform(c);
2724       }
2725     }
2726   }
2727   return c;
2728 }
2729 
2730 //------------------------------do_one_bytecode--------------------------------
2731 // Parse this bytecode, and alter the Parsers JVM->Node mapping
2732 void Parse::do_one_bytecode() {
2733   Node *a, *b, *c, *d;          // Handy temps
2734   BoolTest::mask btest;
2735   int i;
2736 
2737   assert(!has_exceptions(), "bytecode entry state must be clear of throws");
2738 
2739   if (C->check_node_count(NodeLimitFudgeFactor * 5,
2740                           "out of nodes parsing method")) {
2741     return;
2742   }
2743 
2744 #ifdef ASSERT
2745   // for setting breakpoints
2746   if (TraceOptoParse) {
2747     tty->print(" @");
2748     dump_bci(bci());
2749     tty->print(" %s", Bytecodes::name(bc()));
2750     tty->cr();
2751   }
2752 #endif
2753 
2754   switch (bc()) {
2755   case Bytecodes::_nop:
2756     // do nothing
2757     break;
2758   case Bytecodes::_lconst_0:
2759     push_pair(longcon(0));
2760     break;
2761 
2762   case Bytecodes::_lconst_1:
2763     push_pair(longcon(1));
2764     break;
2765 
2766   case Bytecodes::_fconst_0:
2767     push(zerocon(T_FLOAT));
2768     break;
2769 
2770   case Bytecodes::_fconst_1:
2771     push(makecon(TypeF::ONE));
2772     break;
2773 
2774   case Bytecodes::_fconst_2:
2775     push(makecon(TypeF::make(2.0f)));
2776     break;
2777 
2778   case Bytecodes::_dconst_0:
2779     push_pair(zerocon(T_DOUBLE));
2780     break;
2781 
2782   case Bytecodes::_dconst_1:
2783     push_pair(makecon(TypeD::ONE));
2784     break;
2785 
2786   case Bytecodes::_iconst_m1:push(intcon(-1)); break;
2787   case Bytecodes::_iconst_0: push(intcon( 0)); break;
2788   case Bytecodes::_iconst_1: push(intcon( 1)); break;
2789   case Bytecodes::_iconst_2: push(intcon( 2)); break;
2790   case Bytecodes::_iconst_3: push(intcon( 3)); break;
2791   case Bytecodes::_iconst_4: push(intcon( 4)); break;
2792   case Bytecodes::_iconst_5: push(intcon( 5)); break;
2793   case Bytecodes::_bipush:   push(intcon(iter().get_constant_u1())); break;
2794   case Bytecodes::_sipush:   push(intcon(iter().get_constant_u2())); break;
2795   case Bytecodes::_aconst_null: push(null());  break;
2796 
2797   case Bytecodes::_ldc:
2798   case Bytecodes::_ldc_w:
2799   case Bytecodes::_ldc2_w: {
2800     // ciTypeFlow should trap if the ldc is in error state or if the constant is not loaded
2801     assert(!iter().is_in_error(), "ldc is in error state");
2802     ciConstant constant = iter().get_constant();
2803     assert(constant.is_loaded(), "constant is not loaded");
2804     const Type* con_type = Type::make_from_constant(constant);
2805     if (con_type != nullptr) {
2806       push_node(con_type->basic_type(), makecon(con_type));
2807     }
2808     break;
2809   }
2810 
2811   case Bytecodes::_aload_0:
2812     push( local(0) );
2813     break;
2814   case Bytecodes::_aload_1:
2815     push( local(1) );
2816     break;
2817   case Bytecodes::_aload_2:
2818     push( local(2) );
2819     break;
2820   case Bytecodes::_aload_3:
2821     push( local(3) );
2822     break;
2823   case Bytecodes::_aload:
2824     push( local(iter().get_index()) );
2825     break;
2826 
2827   case Bytecodes::_fload_0:
2828   case Bytecodes::_iload_0:
2829     push( local(0) );
2830     break;
2831   case Bytecodes::_fload_1:
2832   case Bytecodes::_iload_1:
2833     push( local(1) );
2834     break;
2835   case Bytecodes::_fload_2:
2836   case Bytecodes::_iload_2:
2837     push( local(2) );
2838     break;
2839   case Bytecodes::_fload_3:
2840   case Bytecodes::_iload_3:
2841     push( local(3) );
2842     break;
2843   case Bytecodes::_fload:
2844   case Bytecodes::_iload:
2845     push( local(iter().get_index()) );
2846     break;
2847   case Bytecodes::_lload_0:
2848     push_pair_local( 0 );
2849     break;
2850   case Bytecodes::_lload_1:
2851     push_pair_local( 1 );
2852     break;
2853   case Bytecodes::_lload_2:
2854     push_pair_local( 2 );
2855     break;
2856   case Bytecodes::_lload_3:
2857     push_pair_local( 3 );
2858     break;
2859   case Bytecodes::_lload:
2860     push_pair_local( iter().get_index() );
2861     break;
2862 
2863   case Bytecodes::_dload_0:
2864     push_pair_local(0);
2865     break;
2866   case Bytecodes::_dload_1:
2867     push_pair_local(1);
2868     break;
2869   case Bytecodes::_dload_2:
2870     push_pair_local(2);
2871     break;
2872   case Bytecodes::_dload_3:
2873     push_pair_local(3);
2874     break;
2875   case Bytecodes::_dload:
2876     push_pair_local(iter().get_index());
2877     break;
2878   case Bytecodes::_fstore_0:
2879   case Bytecodes::_istore_0:
2880   case Bytecodes::_astore_0:
2881     set_local( 0, pop() );
2882     break;
2883   case Bytecodes::_fstore_1:
2884   case Bytecodes::_istore_1:
2885   case Bytecodes::_astore_1:
2886     set_local( 1, pop() );
2887     break;
2888   case Bytecodes::_fstore_2:
2889   case Bytecodes::_istore_2:
2890   case Bytecodes::_astore_2:
2891     set_local( 2, pop() );
2892     break;
2893   case Bytecodes::_fstore_3:
2894   case Bytecodes::_istore_3:
2895   case Bytecodes::_astore_3:
2896     set_local( 3, pop() );
2897     break;
2898   case Bytecodes::_fstore:
2899   case Bytecodes::_istore:
2900   case Bytecodes::_astore:
2901     set_local( iter().get_index(), pop() );
2902     break;
2903   // long stores
2904   case Bytecodes::_lstore_0:
2905     set_pair_local( 0, pop_pair() );
2906     break;
2907   case Bytecodes::_lstore_1:
2908     set_pair_local( 1, pop_pair() );
2909     break;
2910   case Bytecodes::_lstore_2:
2911     set_pair_local( 2, pop_pair() );
2912     break;
2913   case Bytecodes::_lstore_3:
2914     set_pair_local( 3, pop_pair() );
2915     break;
2916   case Bytecodes::_lstore:
2917     set_pair_local( iter().get_index(), pop_pair() );
2918     break;
2919 
2920   // double stores
2921   case Bytecodes::_dstore_0:
2922     set_pair_local( 0, dprecision_rounding(pop_pair()) );
2923     break;
2924   case Bytecodes::_dstore_1:
2925     set_pair_local( 1, dprecision_rounding(pop_pair()) );
2926     break;
2927   case Bytecodes::_dstore_2:
2928     set_pair_local( 2, dprecision_rounding(pop_pair()) );
2929     break;
2930   case Bytecodes::_dstore_3:
2931     set_pair_local( 3, dprecision_rounding(pop_pair()) );
2932     break;
2933   case Bytecodes::_dstore:
2934     set_pair_local( iter().get_index(), dprecision_rounding(pop_pair()) );
2935     break;
2936 
2937   case Bytecodes::_pop:  dec_sp(1);   break;
2938   case Bytecodes::_pop2: dec_sp(2);   break;
2939   case Bytecodes::_swap:
2940     a = pop();
2941     b = pop();
2942     push(a);
2943     push(b);
2944     break;
2945   case Bytecodes::_dup:
2946     a = pop();
2947     push(a);
2948     push(a);
2949     break;
2950   case Bytecodes::_dup_x1:
2951     a = pop();
2952     b = pop();
2953     push( a );
2954     push( b );
2955     push( a );
2956     break;
2957   case Bytecodes::_dup_x2:
2958     a = pop();
2959     b = pop();
2960     c = pop();
2961     push( a );
2962     push( c );
2963     push( b );
2964     push( a );
2965     break;
2966   case Bytecodes::_dup2:
2967     a = pop();
2968     b = pop();
2969     push( b );
2970     push( a );
2971     push( b );
2972     push( a );
2973     break;
2974 
2975   case Bytecodes::_dup2_x1:
2976     // before: .. c, b, a
2977     // after:  .. b, a, c, b, a
2978     // not tested
2979     a = pop();
2980     b = pop();
2981     c = pop();
2982     push( b );
2983     push( a );
2984     push( c );
2985     push( b );
2986     push( a );
2987     break;
2988   case Bytecodes::_dup2_x2:
2989     // before: .. d, c, b, a
2990     // after:  .. b, a, d, c, b, a
2991     // not tested
2992     a = pop();
2993     b = pop();
2994     c = pop();
2995     d = pop();
2996     push( b );
2997     push( a );
2998     push( d );
2999     push( c );
3000     push( b );
3001     push( a );
3002     break;
3003 
3004   case Bytecodes::_arraylength: {
3005     // Must do null-check with value on expression stack
3006     Node *ary = null_check(peek(), T_ARRAY);
3007     // Compile-time detect of null-exception?
3008     if (stopped())  return;
3009     a = pop();
3010     push(load_array_length(a));
3011     break;
3012   }
3013 
3014   case Bytecodes::_baload:  array_load(T_BYTE);    break;
3015   case Bytecodes::_caload:  array_load(T_CHAR);    break;
3016   case Bytecodes::_iaload:  array_load(T_INT);     break;
3017   case Bytecodes::_saload:  array_load(T_SHORT);   break;
3018   case Bytecodes::_faload:  array_load(T_FLOAT);   break;
3019   case Bytecodes::_aaload:  array_load(T_OBJECT);  break;
3020   case Bytecodes::_laload:  array_load(T_LONG);    break;
3021   case Bytecodes::_daload:  array_load(T_DOUBLE);  break;
3022   case Bytecodes::_bastore: array_store(T_BYTE);   break;
3023   case Bytecodes::_castore: array_store(T_CHAR);   break;
3024   case Bytecodes::_iastore: array_store(T_INT);    break;
3025   case Bytecodes::_sastore: array_store(T_SHORT);  break;
3026   case Bytecodes::_fastore: array_store(T_FLOAT);  break;
3027   case Bytecodes::_aastore: array_store(T_OBJECT); break;
3028   case Bytecodes::_lastore: array_store(T_LONG);   break;
3029   case Bytecodes::_dastore: array_store(T_DOUBLE); break;
3030 
3031   case Bytecodes::_getfield:
3032     do_getfield();
3033     break;
3034 
3035   case Bytecodes::_getstatic:
3036     do_getstatic();
3037     break;
3038 
3039   case Bytecodes::_putfield:
3040     do_putfield();
3041     break;
3042 
3043   case Bytecodes::_putstatic:
3044     do_putstatic();
3045     break;
3046 
3047   case Bytecodes::_irem:
3048     // Must keep both values on the expression-stack during null-check
3049     zero_check_int(peek());
3050     // Compile-time detect of null-exception?
3051     if (stopped())  return;
3052     b = pop();
3053     a = pop();
3054     push(_gvn.transform(new ModINode(control(), a, b)));
3055     break;
3056   case Bytecodes::_idiv:
3057     // Must keep both values on the expression-stack during null-check
3058     zero_check_int(peek());
3059     // Compile-time detect of null-exception?
3060     if (stopped())  return;
3061     b = pop();
3062     a = pop();
3063     push( _gvn.transform( new DivINode(control(),a,b) ) );
3064     break;
3065   case Bytecodes::_imul:
3066     b = pop(); a = pop();
3067     push( _gvn.transform( new MulINode(a,b) ) );
3068     break;
3069   case Bytecodes::_iadd:
3070     b = pop(); a = pop();
3071     push( _gvn.transform( new AddINode(a,b) ) );
3072     break;
3073   case Bytecodes::_ineg:
3074     a = pop();
3075     push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) );
3076     break;
3077   case Bytecodes::_isub:
3078     b = pop(); a = pop();
3079     push( _gvn.transform( new SubINode(a,b) ) );
3080     break;
3081   case Bytecodes::_iand:
3082     b = pop(); a = pop();
3083     push( _gvn.transform( new AndINode(a,b) ) );
3084     break;
3085   case Bytecodes::_ior:
3086     b = pop(); a = pop();
3087     push( _gvn.transform( new OrINode(a,b) ) );
3088     break;
3089   case Bytecodes::_ixor:
3090     b = pop(); a = pop();
3091     push( _gvn.transform( new XorINode(a,b) ) );
3092     break;
3093   case Bytecodes::_ishl:
3094     b = pop(); a = pop();
3095     push( _gvn.transform( new LShiftINode(a,b) ) );
3096     break;
3097   case Bytecodes::_ishr:
3098     b = pop(); a = pop();
3099     push( _gvn.transform( new RShiftINode(a,b) ) );
3100     break;
3101   case Bytecodes::_iushr:
3102     b = pop(); a = pop();
3103     push( _gvn.transform( new URShiftINode(a,b) ) );
3104     break;
3105 
3106   case Bytecodes::_fneg:
3107     a = pop();
3108     b = _gvn.transform(new NegFNode (a));
3109     push(b);
3110     break;
3111 
3112   case Bytecodes::_fsub:
3113     b = pop();
3114     a = pop();
3115     c = _gvn.transform( new SubFNode(a,b) );
3116     d = precision_rounding(c);
3117     push( d );
3118     break;
3119 
3120   case Bytecodes::_fadd:
3121     b = pop();
3122     a = pop();
3123     c = _gvn.transform( new AddFNode(a,b) );
3124     d = precision_rounding(c);
3125     push( d );
3126     break;
3127 
3128   case Bytecodes::_fmul:
3129     b = pop();
3130     a = pop();
3131     c = _gvn.transform( new MulFNode(a,b) );
3132     d = precision_rounding(c);
3133     push( d );
3134     break;
3135 
3136   case Bytecodes::_fdiv:
3137     b = pop();
3138     a = pop();
3139     c = _gvn.transform( new DivFNode(nullptr,a,b) );
3140     d = precision_rounding(c);
3141     push( d );
3142     break;
3143 
3144   case Bytecodes::_frem:
3145     if (Matcher::has_match_rule(Op_ModF)) {
3146       // Generate a ModF node.
3147       b = pop();
3148       a = pop();
3149       c = _gvn.transform( new ModFNode(nullptr,a,b) );
3150       d = precision_rounding(c);
3151       push( d );
3152     }
3153     else {
3154       // Generate a call.
3155       modf();
3156     }
3157     break;
3158 
3159   case Bytecodes::_fcmpl:
3160     b = pop();
3161     a = pop();
3162     c = _gvn.transform( new CmpF3Node( a, b));
3163     push(c);
3164     break;
3165   case Bytecodes::_fcmpg:
3166     b = pop();
3167     a = pop();
3168 
3169     // Same as fcmpl but need to flip the unordered case.  Swap the inputs,
3170     // which negates the result sign except for unordered.  Flip the unordered
3171     // as well by using CmpF3 which implements unordered-lesser instead of
3172     // unordered-greater semantics.  Finally, commute the result bits.  Result
3173     // is same as using a CmpF3Greater except we did it with CmpF3 alone.
3174     c = _gvn.transform( new CmpF3Node( b, a));
3175     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3176     push(c);
3177     break;
3178 
3179   case Bytecodes::_f2i:
3180     a = pop();
3181     push(_gvn.transform(new ConvF2INode(a)));
3182     break;
3183 
3184   case Bytecodes::_d2i:
3185     a = pop_pair();
3186     b = _gvn.transform(new ConvD2INode(a));
3187     push( b );
3188     break;
3189 
3190   case Bytecodes::_f2d:
3191     a = pop();
3192     b = _gvn.transform( new ConvF2DNode(a));
3193     push_pair( b );
3194     break;
3195 
3196   case Bytecodes::_d2f:
3197     a = pop_pair();
3198     b = _gvn.transform( new ConvD2FNode(a));
3199     // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
3200     //b = _gvn.transform(new RoundFloatNode(nullptr, b) );
3201     push( b );
3202     break;
3203 
3204   case Bytecodes::_l2f:
3205     if (Matcher::convL2FSupported()) {
3206       a = pop_pair();
3207       b = _gvn.transform( new ConvL2FNode(a));
3208       // For x86_32.ad, FILD doesn't restrict precision to 24 or 53 bits.
3209       // Rather than storing the result into an FP register then pushing
3210       // out to memory to round, the machine instruction that implements
3211       // ConvL2D is responsible for rounding.
3212       // c = precision_rounding(b);
3213       push(b);
3214     } else {
3215       l2f();
3216     }
3217     break;
3218 
3219   case Bytecodes::_l2d:
3220     a = pop_pair();
3221     b = _gvn.transform( new ConvL2DNode(a));
3222     // For x86_32.ad, rounding is always necessary (see _l2f above).
3223     // c = dprecision_rounding(b);
3224     push_pair(b);
3225     break;
3226 
3227   case Bytecodes::_f2l:
3228     a = pop();
3229     b = _gvn.transform( new ConvF2LNode(a));
3230     push_pair(b);
3231     break;
3232 
3233   case Bytecodes::_d2l:
3234     a = pop_pair();
3235     b = _gvn.transform( new ConvD2LNode(a));
3236     push_pair(b);
3237     break;
3238 
3239   case Bytecodes::_dsub:
3240     b = pop_pair();
3241     a = pop_pair();
3242     c = _gvn.transform( new SubDNode(a,b) );
3243     d = dprecision_rounding(c);
3244     push_pair( d );
3245     break;
3246 
3247   case Bytecodes::_dadd:
3248     b = pop_pair();
3249     a = pop_pair();
3250     c = _gvn.transform( new AddDNode(a,b) );
3251     d = dprecision_rounding(c);
3252     push_pair( d );
3253     break;
3254 
3255   case Bytecodes::_dmul:
3256     b = pop_pair();
3257     a = pop_pair();
3258     c = _gvn.transform( new MulDNode(a,b) );
3259     d = dprecision_rounding(c);
3260     push_pair( d );
3261     break;
3262 
3263   case Bytecodes::_ddiv:
3264     b = pop_pair();
3265     a = pop_pair();
3266     c = _gvn.transform( new DivDNode(nullptr,a,b) );
3267     d = dprecision_rounding(c);
3268     push_pair( d );
3269     break;
3270 
3271   case Bytecodes::_dneg:
3272     a = pop_pair();
3273     b = _gvn.transform(new NegDNode (a));
3274     push_pair(b);
3275     break;
3276 
3277   case Bytecodes::_drem:
3278     if (Matcher::has_match_rule(Op_ModD)) {
3279       // Generate a ModD node.
3280       b = pop_pair();
3281       a = pop_pair();
3282       // a % b
3283 
3284       c = _gvn.transform( new ModDNode(nullptr,a,b) );
3285       d = dprecision_rounding(c);
3286       push_pair( d );
3287     }
3288     else {
3289       // Generate a call.
3290       modd();
3291     }
3292     break;
3293 
3294   case Bytecodes::_dcmpl:
3295     b = pop_pair();
3296     a = pop_pair();
3297     c = _gvn.transform( new CmpD3Node( a, b));
3298     push(c);
3299     break;
3300 
3301   case Bytecodes::_dcmpg:
3302     b = pop_pair();
3303     a = pop_pair();
3304     // Same as dcmpl but need to flip the unordered case.
3305     // Commute the inputs, which negates the result sign except for unordered.
3306     // Flip the unordered as well by using CmpD3 which implements
3307     // unordered-lesser instead of unordered-greater semantics.
3308     // Finally, negate the result bits.  Result is same as using a
3309     // CmpD3Greater except we did it with CmpD3 alone.
3310     c = _gvn.transform( new CmpD3Node( b, a));
3311     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3312     push(c);
3313     break;
3314 
3315 
3316     // Note for longs -> lo word is on TOS, hi word is on TOS - 1
3317   case Bytecodes::_land:
3318     b = pop_pair();
3319     a = pop_pair();
3320     c = _gvn.transform( new AndLNode(a,b) );
3321     push_pair(c);
3322     break;
3323   case Bytecodes::_lor:
3324     b = pop_pair();
3325     a = pop_pair();
3326     c = _gvn.transform( new OrLNode(a,b) );
3327     push_pair(c);
3328     break;
3329   case Bytecodes::_lxor:
3330     b = pop_pair();
3331     a = pop_pair();
3332     c = _gvn.transform( new XorLNode(a,b) );
3333     push_pair(c);
3334     break;
3335 
3336   case Bytecodes::_lshl:
3337     b = pop();                  // the shift count
3338     a = pop_pair();             // value to be shifted
3339     c = _gvn.transform( new LShiftLNode(a,b) );
3340     push_pair(c);
3341     break;
3342   case Bytecodes::_lshr:
3343     b = pop();                  // the shift count
3344     a = pop_pair();             // value to be shifted
3345     c = _gvn.transform( new RShiftLNode(a,b) );
3346     push_pair(c);
3347     break;
3348   case Bytecodes::_lushr:
3349     b = pop();                  // the shift count
3350     a = pop_pair();             // value to be shifted
3351     c = _gvn.transform( new URShiftLNode(a,b) );
3352     push_pair(c);
3353     break;
3354   case Bytecodes::_lmul:
3355     b = pop_pair();
3356     a = pop_pair();
3357     c = _gvn.transform( new MulLNode(a,b) );
3358     push_pair(c);
3359     break;
3360 
3361   case Bytecodes::_lrem:
3362     // Must keep both values on the expression-stack during null-check
3363     assert(peek(0) == top(), "long word order");
3364     zero_check_long(peek(1));
3365     // Compile-time detect of null-exception?
3366     if (stopped())  return;
3367     b = pop_pair();
3368     a = pop_pair();
3369     c = _gvn.transform( new ModLNode(control(),a,b) );
3370     push_pair(c);
3371     break;
3372 
3373   case Bytecodes::_ldiv:
3374     // Must keep both values on the expression-stack during null-check
3375     assert(peek(0) == top(), "long word order");
3376     zero_check_long(peek(1));
3377     // Compile-time detect of null-exception?
3378     if (stopped())  return;
3379     b = pop_pair();
3380     a = pop_pair();
3381     c = _gvn.transform( new DivLNode(control(),a,b) );
3382     push_pair(c);
3383     break;
3384 
3385   case Bytecodes::_ladd:
3386     b = pop_pair();
3387     a = pop_pair();
3388     c = _gvn.transform( new AddLNode(a,b) );
3389     push_pair(c);
3390     break;
3391   case Bytecodes::_lsub:
3392     b = pop_pair();
3393     a = pop_pair();
3394     c = _gvn.transform( new SubLNode(a,b) );
3395     push_pair(c);
3396     break;
3397   case Bytecodes::_lcmp:
3398     // Safepoints are now inserted _before_ branches.  The long-compare
3399     // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
3400     // slew of control flow.  These are usually followed by a CmpI vs zero and
3401     // a branch; this pattern then optimizes to the obvious long-compare and
3402     // branch.  However, if the branch is backwards there's a Safepoint
3403     // inserted.  The inserted Safepoint captures the JVM state at the
3404     // pre-branch point, i.e. it captures the 3-way value.  Thus if a
3405     // long-compare is used to control a loop the debug info will force
3406     // computation of the 3-way value, even though the generated code uses a
3407     // long-compare and branch.  We try to rectify the situation by inserting
3408     // a SafePoint here and have it dominate and kill the safepoint added at a
3409     // following backwards branch.  At this point the JVM state merely holds 2
3410     // longs but not the 3-way value.
3411     switch (iter().next_bc()) {
3412       case Bytecodes::_ifgt:
3413       case Bytecodes::_iflt:
3414       case Bytecodes::_ifge:
3415       case Bytecodes::_ifle:
3416       case Bytecodes::_ifne:
3417       case Bytecodes::_ifeq:
3418         // If this is a backwards branch in the bytecodes, add Safepoint
3419         maybe_add_safepoint(iter().next_get_dest());
3420       default:
3421         break;
3422     }
3423     b = pop_pair();
3424     a = pop_pair();
3425     c = _gvn.transform( new CmpL3Node( a, b ));
3426     push(c);
3427     break;
3428 
3429   case Bytecodes::_lneg:
3430     a = pop_pair();
3431     b = _gvn.transform( new SubLNode(longcon(0),a));
3432     push_pair(b);
3433     break;
3434   case Bytecodes::_l2i:
3435     a = pop_pair();
3436     push( _gvn.transform( new ConvL2INode(a)));
3437     break;
3438   case Bytecodes::_i2l:
3439     a = pop();
3440     b = _gvn.transform( new ConvI2LNode(a));
3441     push_pair(b);
3442     break;
3443   case Bytecodes::_i2b:
3444     // Sign extend
3445     a = pop();
3446     a = Compile::narrow_value(T_BYTE, a, nullptr, &_gvn, true);
3447     push(a);
3448     break;
3449   case Bytecodes::_i2s:
3450     a = pop();
3451     a = Compile::narrow_value(T_SHORT, a, nullptr, &_gvn, true);
3452     push(a);
3453     break;
3454   case Bytecodes::_i2c:
3455     a = pop();
3456     a = Compile::narrow_value(T_CHAR, a, nullptr, &_gvn, true);
3457     push(a);
3458     break;
3459 
3460   case Bytecodes::_i2f:
3461     a = pop();
3462     b = _gvn.transform( new ConvI2FNode(a) ) ;
3463     c = precision_rounding(b);
3464     push (b);
3465     break;
3466 
3467   case Bytecodes::_i2d:
3468     a = pop();
3469     b = _gvn.transform( new ConvI2DNode(a));
3470     push_pair(b);
3471     break;
3472 
3473   case Bytecodes::_iinc:        // Increment local
3474     i = iter().get_index();     // Get local index
3475     set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
3476     break;
3477 
3478   // Exit points of synchronized methods must have an unlock node
3479   case Bytecodes::_return:
3480     return_current(nullptr);
3481     break;
3482 
3483   case Bytecodes::_ireturn:
3484   case Bytecodes::_areturn:
3485   case Bytecodes::_freturn:
3486     return_current(pop());
3487     break;
3488   case Bytecodes::_lreturn:
3489     return_current(pop_pair());
3490     break;
3491   case Bytecodes::_dreturn:
3492     return_current(pop_pair());
3493     break;
3494 
3495   case Bytecodes::_athrow:
3496     // null exception oop throws null pointer exception
3497     null_check(peek());
3498     if (stopped())  return;
3499     // Hook the thrown exception directly to subsequent handlers.
3500     if (BailoutToInterpreterForThrows) {
3501       // Keep method interpreted from now on.
3502       uncommon_trap(Deoptimization::Reason_unhandled,
3503                     Deoptimization::Action_make_not_compilable);
3504       return;
3505     }
3506     if (env()->jvmti_can_post_on_exceptions()) {
3507       // check if we must post exception events, take uncommon trap if so (with must_throw = false)
3508       uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
3509     }
3510     // Here if either can_post_on_exceptions or should_post_on_exceptions is false
3511     add_exception_state(make_exception_state(peek()));
3512     break;
3513 
3514   case Bytecodes::_goto:   // fall through
3515   case Bytecodes::_goto_w: {
3516     int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
3517 
3518     // If this is a backwards branch in the bytecodes, add Safepoint
3519     maybe_add_safepoint(target_bci);
3520 
3521     // Merge the current control into the target basic block
3522     merge(target_bci);
3523 
3524     // See if we can get some profile data and hand it off to the next block
3525     Block *target_block = block()->successor_for_bci(target_bci);
3526     if (target_block->pred_count() != 1)  break;
3527     ciMethodData* methodData = method()->method_data();
3528     if (!methodData->is_mature())  break;
3529     ciProfileData* data = methodData->bci_to_data(bci());
3530     assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3531     int taken = ((ciJumpData*)data)->taken();
3532     taken = method()->scale_count(taken);
3533     target_block->set_count(taken);
3534     break;
3535   }
3536 
3537   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
3538   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3539   handle_if_null:
3540     // If this is a backwards branch in the bytecodes, add Safepoint
3541     maybe_add_safepoint(iter().get_dest());
3542     a = null();
3543     b = pop();
3544     if (b->is_InlineType()) {
3545       // Null checking a scalarized but nullable inline type. Check the IsInit
3546       // input instead of the oop input to avoid keeping buffer allocations alive
3547       c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT)));
3548     } else {
3549       if (!_gvn.type(b)->speculative_maybe_null() &&
3550           !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3551         inc_sp(1);
3552         Node* null_ctl = top();
3553         b = null_check_oop(b, &null_ctl, true, true, true);
3554         assert(null_ctl->is_top(), "no null control here");
3555         dec_sp(1);
3556       } else if (_gvn.type(b)->speculative_always_null() &&
3557                  !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3558         inc_sp(1);
3559         b = null_assert(b);
3560         dec_sp(1);
3561       }
3562       c = _gvn.transform( new CmpPNode(b, a) );
3563     }
3564     do_ifnull(btest, c);
3565     break;
3566 
3567   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3568   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3569   handle_if_acmp:
3570     // If this is a backwards branch in the bytecodes, add Safepoint
3571     maybe_add_safepoint(iter().get_dest());
3572     a = pop();
3573     b = pop();
3574     do_acmp(btest, b, a);
3575     break;
3576 
3577   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3578   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3579   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3580   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3581   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3582   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3583   handle_ifxx:
3584     // If this is a backwards branch in the bytecodes, add Safepoint
3585     maybe_add_safepoint(iter().get_dest());
3586     a = _gvn.intcon(0);
3587     b = pop();
3588     c = _gvn.transform( new CmpINode(b, a) );
3589     do_if(btest, c);
3590     break;
3591 
3592   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3593   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3594   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3595   case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
3596   case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
3597   case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
3598   handle_if_icmp:
3599     // If this is a backwards branch in the bytecodes, add Safepoint
3600     maybe_add_safepoint(iter().get_dest());
3601     a = pop();
3602     b = pop();
3603     c = _gvn.transform( new CmpINode( b, a ) );
3604     do_if(btest, c);
3605     break;
3606 
3607   case Bytecodes::_tableswitch:
3608     do_tableswitch();
3609     break;
3610 
3611   case Bytecodes::_lookupswitch:
3612     do_lookupswitch();
3613     break;
3614 
3615   case Bytecodes::_invokestatic:
3616   case Bytecodes::_invokedynamic:
3617   case Bytecodes::_invokespecial:
3618   case Bytecodes::_invokevirtual:
3619   case Bytecodes::_invokeinterface:
3620     do_call();
3621     break;
3622   case Bytecodes::_checkcast:
3623     do_checkcast();
3624     break;
3625   case Bytecodes::_instanceof:
3626     do_instanceof();
3627     break;
3628   case Bytecodes::_anewarray:
3629     do_newarray();
3630     break;
3631   case Bytecodes::_newarray:
3632     do_newarray((BasicType)iter().get_index());
3633     break;
3634   case Bytecodes::_multianewarray:
3635     do_multianewarray();
3636     break;
3637   case Bytecodes::_new:
3638     do_new();
3639     break;
3640 
3641   case Bytecodes::_jsr:
3642   case Bytecodes::_jsr_w:
3643     do_jsr();
3644     break;
3645 
3646   case Bytecodes::_ret:
3647     do_ret();
3648     break;
3649 
3650 
3651   case Bytecodes::_monitorenter:
3652     do_monitor_enter();
3653     break;
3654 
3655   case Bytecodes::_monitorexit:
3656     do_monitor_exit();
3657     break;
3658 
3659   case Bytecodes::_breakpoint:
3660     // Breakpoint set concurrently to compile
3661     // %%% use an uncommon trap?
3662     C->record_failure("breakpoint in method");
3663     return;
3664 
3665   default:
3666 #ifndef PRODUCT
3667     map()->dump(99);
3668 #endif
3669     tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
3670     ShouldNotReachHere();
3671   }
3672 
3673 #ifndef PRODUCT
3674   if (failing()) { return; }
3675   constexpr int perBytecode = 6;
3676   if (C->should_print_igv(perBytecode)) {
3677     IdealGraphPrinter* printer = C->igv_printer();
3678     char buffer[256];
3679     jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
3680     bool old = printer->traverse_outs();
3681     printer->set_traverse_outs(true);
3682     printer->print_method(buffer, perBytecode);
3683     printer->set_traverse_outs(old);
3684   }
3685 #endif
3686 }