1 /*
   2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciMethodData.hpp"
  26 #include "ci/ciSymbols.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileLog.hpp"
  29 #include "interpreter/linkResolver.hpp"
  30 #include "jvm_io.h"
  31 #include "memory/resourceArea.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/divnode.hpp"
  38 #include "opto/idealGraphPrinter.hpp"
  39 #include "opto/idealKit.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/matcher.hpp"
  42 #include "opto/memnode.hpp"
  43 #include "opto/mulnode.hpp"
  44 #include "opto/opaquenode.hpp"
  45 #include "opto/parse.hpp"
  46 #include "opto/runtime.hpp"
  47 #include "runtime/deoptimization.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 
  50 #ifndef PRODUCT
  51 extern uint explicit_null_checks_inserted,
  52             explicit_null_checks_elided;
  53 #endif
  54 
  55 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
  56   // Feed unused profile data to type speculation
  57   if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
  58     ciKlass* array_type = nullptr;
  59     ciKlass* element_type = nullptr;
  60     ProfilePtrKind element_ptr = ProfileMaybeNull;
  61     bool flat_array = true;
  62     bool null_free_array = true;
  63     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
  64     if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
  65       ld = record_profile_for_speculation(ld, element_type, element_ptr);
  66     }
  67   }
  68   return ld;
  69 }
  70 
  71 
  72 //---------------------------------array_load----------------------------------
  73 void Parse::array_load(BasicType bt) {
  74   const Type* elemtype = Type::TOP;
  75   Node* adr = array_addressing(bt, 0, elemtype);
  76   if (stopped())  return;     // guaranteed null or range check
  77 
  78   Node* array_index = pop();
  79   Node* array = pop();
  80 
  81   // Handle inline type arrays
  82   const TypeOopPtr* element_ptr = elemtype->make_oopptr();
  83   const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
  84 
  85   if (!array_type->is_not_flat()) {
  86     // Cannot statically determine if array is a flat array, emit runtime check
  87     assert(UseArrayFlattening && is_reference_type(bt) && element_ptr->can_be_inline_type() &&
  88            (!element_ptr->is_inlinetypeptr() || element_ptr->inline_klass()->flat_in_array()), "array can't be flat");
  89     IdealKit ideal(this);
  90     IdealVariable res(ideal);
  91     ideal.declarations_done();
  92     ideal.if_then(flat_array_test(array, /* flat = */ false)); {
  93       // Non-flat array
  94       sync_kit(ideal);
  95       if (!array_type->is_flat()) {
  96         assert(array_type->is_flat() || control()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
  97         const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  98         DecoratorSet decorator_set = IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD;
  99         if (needs_range_check(array_type->size(), array_index)) {
 100           // We've emitted a RangeCheck but now insert an additional check between the range check and the actual load.
 101           // We cannot pin the load to two separate nodes. Instead, we pin it conservatively here such that it cannot
 102           // possibly float above the range check at any point.
 103           decorator_set |= C2_UNKNOWN_CONTROL_LOAD;
 104         }
 105         Node* ld = access_load_at(array, adr, adr_type, element_ptr, bt, decorator_set);
 106         if (element_ptr->is_inlinetypeptr()) {
 107           ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
 108         }
 109         ideal.set(res, ld);
 110       }
 111       ideal.sync_kit(this);
 112     } ideal.else_(); {
 113       // Flat array
 114       sync_kit(ideal);
 115       if (!array_type->is_not_flat()) {
 116         if (element_ptr->is_inlinetypeptr()) {
 117           // Element type is known, cast and load from flat array layout.
 118           ciInlineKlass* vk = element_ptr->inline_klass();
 119           bool is_null_free = array_type->is_null_free() || !vk->has_nullable_atomic_layout();
 120           bool is_not_null_free = array_type->is_not_null_free() || (!vk->has_atomic_layout() && !vk->has_non_atomic_layout());
 121           if (is_null_free) {
 122             // TODO 8350865 Impossible type
 123             is_not_null_free = false;
 124           }
 125           bool is_naturally_atomic = is_null_free && vk->nof_declared_nonstatic_fields() <= 1;
 126           bool may_need_atomicity = !is_naturally_atomic && ((!is_not_null_free && vk->has_atomic_layout()) || (!is_null_free && vk->has_nullable_atomic_layout()));
 127 
 128           adr = flat_array_element_address(array, array_index, vk, is_null_free, is_not_null_free, may_need_atomicity);
 129           int nm_offset = is_null_free ? -1 : vk->null_marker_offset_in_payload();
 130           Node* vt = InlineTypeNode::make_from_flat(this, vk, array, adr, array_index, nullptr, 0, may_need_atomicity, nm_offset);
 131           ideal.set(res, vt);
 132         } else {
 133           // Element type is unknown, and thus we cannot statically determine the exact flat array layout. Emit a
 134           // runtime call to correctly load the inline type element from the flat array.
 135           Node* inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
 136           ideal.set(res, inline_type);
 137         }
 138       }
 139       ideal.sync_kit(this);
 140     } ideal.end_if();
 141     sync_kit(ideal);
 142     Node* ld = _gvn.transform(ideal.value(res));
 143     ld = record_profile_for_speculation_at_array_load(ld);
 144     push_node(bt, ld);
 145     return;
 146   }
 147 
 148   if (elemtype == TypeInt::BOOL) {
 149     bt = T_BOOLEAN;
 150   }
 151   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 152   Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
 153                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 154   ld = record_profile_for_speculation_at_array_load(ld);
 155   // Loading an inline type from a non-flat array
 156   if (element_ptr != nullptr && element_ptr->is_inlinetypeptr()) {
 157     assert(!array_type->is_null_free() || !element_ptr->maybe_null(), "inline type array elements should never be null");
 158     ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
 159   }
 160   push_node(bt, ld);
 161 }
 162 
 163 Node* Parse::load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr) {
 164   // Below membars keep this access to an unknown flat array correctly
 165   // ordered with other unknown and known flat array accesses.
 166   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 167 
 168   Node* call = nullptr;
 169   {
 170     // Re-execute flat array load if runtime call triggers deoptimization
 171     PreserveReexecuteState preexecs(this);
 172     jvms()->set_bci(_bci);
 173     jvms()->set_should_reexecute(true);
 174     inc_sp(2);
 175     kill_dead_locals();
 176     call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 177                              OptoRuntime::load_unknown_inline_Type(),
 178                              OptoRuntime::load_unknown_inline_Java(),
 179                              nullptr, TypeRawPtr::BOTTOM,
 180                              array, array_index);
 181   }
 182   make_slow_call_ex(call, env()->Throwable_klass(), false);
 183   Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
 184 
 185   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 186 
 187   // Keep track of the information that the inline type is in flat arrays
 188   const Type* unknown_value = element_ptr->is_instptr()->cast_to_flat_in_array();
 189   return _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
 190 }
 191 
 192 //--------------------------------array_store----------------------------------
 193 void Parse::array_store(BasicType bt) {
 194   const Type* elemtype = Type::TOP;
 195   Node* adr = array_addressing(bt, type2size[bt], elemtype);
 196   if (stopped())  return;     // guaranteed null or range check
 197   Node* stored_value_casted = nullptr;
 198   if (bt == T_OBJECT) {
 199     stored_value_casted = array_store_check(adr, elemtype);
 200     if (stopped()) {
 201       return;
 202     }
 203   }
 204   Node* const stored_value = pop_node(bt); // Value to store
 205   Node* const array_index = pop();         // Index in the array
 206   Node* array = pop();                     // The array itself
 207 
 208   const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
 209   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 210 
 211   if (elemtype == TypeInt::BOOL) {
 212     bt = T_BOOLEAN;
 213   } else if (bt == T_OBJECT) {
 214     elemtype = elemtype->make_oopptr();
 215     const Type* stored_value_casted_type = _gvn.type(stored_value_casted);
 216     // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
 217     // This is only legal for non-null stores because the array_store_check always passes for null, even
 218     // if the array is null-free. Null stores are handled in GraphKit::inline_array_null_guard().
 219     bool not_inline = !stored_value_casted_type->maybe_null() && !stored_value_casted_type->is_oopptr()->can_be_inline_type();
 220     bool not_null_free = not_inline;
 221     bool not_flat = not_inline || ( stored_value_casted_type->is_inlinetypeptr() &&
 222                                    !stored_value_casted_type->inline_klass()->flat_in_array());
 223     if (!array_type->is_not_null_free() && not_null_free) {
 224       // Storing a non-inline type, mark array as not null-free.
 225       array_type = array_type->cast_to_not_null_free();
 226       Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
 227       replace_in_map(array, cast);
 228       array = cast;
 229     }
 230     if (!array_type->is_not_flat() && not_flat) {
 231       // Storing to a non-flat array, mark array as not flat.
 232       array_type = array_type->cast_to_not_flat();
 233       Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
 234       replace_in_map(array, cast);
 235       array = cast;
 236     }
 237 
 238     if (!array_type->is_flat() && array_type->is_null_free()) {
 239       // Store to non-flat null-free inline type array (elements can never be null)
 240       assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
 241       if (elemtype->is_inlinetypeptr() && elemtype->inline_klass()->is_empty()) {
 242         // Ignore empty inline stores, array is already initialized.
 243         return;
 244       }
 245     } else if (!array_type->is_not_flat()) {
 246       // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
 247       assert(UseArrayFlattening && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
 248              (!array_type->klass_is_exact() || array_type->is_flat()), "array can't be a flat array");
 249       // TODO 8350865 Depending on the available layouts, we can avoid this check in below flat/not-flat branches. Also the safe_for_replace arg is now always true.
 250       array = inline_array_null_guard(array, stored_value_casted, 3, true);
 251       IdealKit ideal(this);
 252       ideal.if_then(flat_array_test(array, /* flat = */ false)); {
 253         // Non-flat array
 254         if (!array_type->is_flat()) {
 255           sync_kit(ideal);
 256           assert(array_type->is_flat() || ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 257           inc_sp(3);
 258           access_store_at(array, adr, adr_type, stored_value_casted, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
 259           dec_sp(3);
 260           ideal.sync_kit(this);
 261         }
 262       } ideal.else_(); {
 263         // Flat array
 264         sync_kit(ideal);
 265         if (!array_type->is_not_flat()) {
 266           // Try to determine the inline klass type of the stored value
 267           ciInlineKlass* vk = nullptr;
 268           if (stored_value_casted_type->is_inlinetypeptr()) {
 269             vk = stored_value_casted_type->inline_klass();
 270           } else if (elemtype->is_inlinetypeptr()) {
 271             vk = elemtype->inline_klass();
 272           }
 273 
 274           if (vk != nullptr) {
 275             // Element type is known, cast and store to flat array layout.
 276             bool is_null_free = array_type->is_null_free() || !vk->has_nullable_atomic_layout();
 277             bool is_not_null_free = array_type->is_not_null_free() || (!vk->has_atomic_layout() && !vk->has_non_atomic_layout());
 278             if (is_null_free) {
 279               // TODO 8350865 Impossible type
 280               is_not_null_free = false;
 281             }
 282             bool is_naturally_atomic = is_null_free && vk->nof_declared_nonstatic_fields() <= 1;
 283             bool may_need_atomicity = !is_naturally_atomic && ((!is_not_null_free && vk->has_atomic_layout()) || (!is_null_free && vk->has_nullable_atomic_layout()));
 284 
 285             // Re-execute flat array store if buffering triggers deoptimization
 286             PreserveReexecuteState preexecs(this);
 287             jvms()->set_should_reexecute(true);
 288             inc_sp(3);
 289 
 290             if (!stored_value_casted->is_InlineType()) {
 291               assert(_gvn.type(stored_value_casted) == TypePtr::NULL_PTR, "Unexpected value");
 292               stored_value_casted = InlineTypeNode::make_null(_gvn, vk);
 293             }
 294             adr = flat_array_element_address(array, array_index, vk, is_null_free, is_not_null_free, may_need_atomicity);
 295             int nm_offset = is_null_free ? -1 : vk->null_marker_offset_in_payload();
 296             stored_value_casted->as_InlineType()->store_flat(this, array, adr, array_index, nullptr, 0, may_need_atomicity, nm_offset, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 297           } else {
 298             // Element type is unknown, emit a runtime call since the flat array layout is not statically known.
 299             store_to_unknown_flat_array(array, array_index, stored_value_casted);
 300           }
 301         }
 302         ideal.sync_kit(this);
 303       }
 304       ideal.end_if();
 305       sync_kit(ideal);
 306       return;
 307     } else if (!array_type->is_not_null_free()) {
 308       // Array is not flat but may be null free
 309       assert(elemtype->is_oopptr()->can_be_inline_type(), "array can't be null-free");
 310       array = inline_array_null_guard(array, stored_value_casted, 3, true);
 311     }
 312   }
 313   inc_sp(3);
 314   access_store_at(array, adr, adr_type, stored_value, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 315   dec_sp(3);
 316 }
 317 
 318 // Emit a runtime call to store to a flat array whose element type is either unknown (i.e. we do not know the flat
 319 // array layout) or not exact (could have different flat array layouts at runtime).
 320 void Parse::store_to_unknown_flat_array(Node* array, Node* const idx, Node* non_null_stored_value) {
 321   // Below membars keep this access to an unknown flat array correctly
 322   // ordered with other unknown and known flat array accesses.
 323   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 324 
 325   Node* call = nullptr;
 326   {
 327     // Re-execute flat array store if runtime call triggers deoptimization
 328     PreserveReexecuteState preexecs(this);
 329     jvms()->set_bci(_bci);
 330     jvms()->set_should_reexecute(true);
 331     inc_sp(3);
 332     kill_dead_locals();
 333     call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 334                       OptoRuntime::store_unknown_inline_Type(),
 335                       OptoRuntime::store_unknown_inline_Java(),
 336                       nullptr, TypeRawPtr::BOTTOM,
 337                       non_null_stored_value, array, idx);
 338   }
 339   make_slow_call_ex(call, env()->Throwable_klass(), false);
 340 
 341   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 342 }
 343 
 344 //------------------------------array_addressing-------------------------------
 345 // Pull array and index from the stack.  Compute pointer-to-element.
 346 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 347   Node *idx   = peek(0+vals);   // Get from stack without popping
 348   Node *ary   = peek(1+vals);   // in case of exception
 349 
 350   // Null check the array base, with correct stack contents
 351   ary = null_check(ary, T_ARRAY);
 352   // Compile-time detect of null-exception?
 353   if (stopped())  return top();
 354 
 355   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 356   const TypeInt*    sizetype = arytype->size();
 357   elemtype = arytype->elem();
 358 
 359   if (UseUniqueSubclasses) {
 360     const Type* el = elemtype->make_ptr();
 361     if (el && el->isa_instptr()) {
 362       const TypeInstPtr* toop = el->is_instptr();
 363       if (toop->instance_klass()->unique_concrete_subklass()) {
 364         // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
 365         const Type* subklass = Type::get_const_type(toop->instance_klass());
 366         elemtype = subklass->join_speculative(el);
 367       }
 368     }
 369   }
 370 
 371   if (!arytype->is_loaded()) {
 372     // Only fails for some -Xcomp runs
 373     // The class is unloaded.  We have to run this bytecode in the interpreter.
 374     ciKlass* klass = arytype->unloaded_klass();
 375 
 376     uncommon_trap(Deoptimization::Reason_unloaded,
 377                   Deoptimization::Action_reinterpret,
 378                   klass, "!loaded array");
 379     return top();
 380   }
 381 
 382   ary = create_speculative_inline_type_array_checks(ary, arytype, elemtype);
 383 
 384   if (needs_range_check(sizetype, idx)) {
 385     create_range_check(idx, ary, sizetype);
 386   } else if (C->log() != nullptr) {
 387     C->log()->elem("observe that='!need_range_check'");
 388   }
 389 
 390   // Check for always knowing you are throwing a range-check exception
 391   if (stopped())  return top();
 392 
 393   // Make array address computation control dependent to prevent it
 394   // from floating above the range check during loop optimizations.
 395   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 396   assert(ptr != top(), "top should go hand-in-hand with stopped");
 397 
 398   return ptr;
 399 }
 400 
 401 // Check if we need a range check for an array access. This is the case if the index is either negative or if it could
 402 // be greater or equal the smallest possible array size (i.e. out-of-bounds).
 403 bool Parse::needs_range_check(const TypeInt* size_type, const Node* index) const {
 404   const TypeInt* index_type = _gvn.type(index)->is_int();
 405   return index_type->_hi >= size_type->_lo || index_type->_lo < 0;
 406 }
 407 
 408 void Parse::create_range_check(Node* idx, Node* ary, const TypeInt* sizetype) {
 409   Node* tst;
 410   if (sizetype->_hi <= 0) {
 411     // The greatest array bound is negative, so we can conclude that we're
 412     // compiling unreachable code, but the unsigned compare trick used below
 413     // only works with non-negative lengths.  Instead, hack "tst" to be zero so
 414     // the uncommon_trap path will always be taken.
 415     tst = _gvn.intcon(0);
 416   } else {
 417     // Range is constant in array-oop, so we can use the original state of mem
 418     Node* len = load_array_length(ary);
 419 
 420     // Test length vs index (standard trick using unsigned compare)
 421     Node* chk = _gvn.transform(new CmpUNode(idx, len) );
 422     BoolTest::mask btest = BoolTest::lt;
 423     tst = _gvn.transform(new BoolNode(chk, btest) );
 424   }
 425   RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 426   _gvn.set_type(rc, rc->Value(&_gvn));
 427   if (!tst->is_Con()) {
 428     record_for_igvn(rc);
 429   }
 430   set_control(_gvn.transform(new IfTrueNode(rc)));
 431   // Branch to failure if out of bounds
 432   {
 433     PreserveJVMState pjvms(this);
 434     set_control(_gvn.transform(new IfFalseNode(rc)));
 435     if (C->allow_range_check_smearing()) {
 436       // Do not use builtin_throw, since range checks are sometimes
 437       // made more stringent by an optimistic transformation.
 438       // This creates "tentative" range checks at this point,
 439       // which are not guaranteed to throw exceptions.
 440       // See IfNode::Ideal, is_range_check, adjust_check.
 441       uncommon_trap(Deoptimization::Reason_range_check,
 442                     Deoptimization::Action_make_not_entrant,
 443                     nullptr, "range_check");
 444     } else {
 445       // If we have already recompiled with the range-check-widening
 446       // heroic optimization turned off, then we must really be throwing
 447       // range check exceptions.
 448       builtin_throw(Deoptimization::Reason_range_check);
 449     }
 450   }
 451 }
 452 
 453 // For inline type arrays, we can use the profiling information for array accesses to speculate on the type, flatness,
 454 // and null-freeness. We can either prepare the speculative type for later uses or emit explicit speculative checks with
 455 // traps now. In the latter case, the speculative type guarantees can avoid additional runtime checks later (e.g.
 456 // non-null-free implies non-flat which allows us to remove flatness checks). This makes the graph simpler.
 457 Node* Parse::create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type,
 458                                                          const Type*& element_type) {
 459   if (!array_type->is_flat() && !array_type->is_not_flat()) {
 460     // For arrays that might be flat, speculate that the array has the exact type reported in the profile data such that
 461     // we can rely on a fixed memory layout (i.e. either a flat layout or not).
 462     array = cast_to_speculative_array_type(array, array_type, element_type);
 463   } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
 464     // Array is known to be either flat or not flat. If possible, update the speculative type by using the profile data
 465     // at this bci.
 466     array = cast_to_profiled_array_type(array);
 467   }
 468 
 469   // Even though the type does not tell us whether we have an inline type array or not, we can still check the profile data
 470   // whether we have a non-null-free or non-flat array. Speculating on a non-null-free array doesn't help aaload but could
 471   // be profitable for a subsequent aastore.
 472   if (!array_type->is_null_free() && !array_type->is_not_null_free()) {
 473     array = speculate_non_null_free_array(array, array_type);
 474   }
 475   if (!array_type->is_flat() && !array_type->is_not_flat()) {
 476     array = speculate_non_flat_array(array, array_type);
 477   }
 478   return array;
 479 }
 480 
 481 // Speculate that the array has the exact type reported in the profile data. We emit a trap when this turns out to be
 482 // wrong. On the fast path, we add a CheckCastPP to use the exact type.
 483 Node* Parse::cast_to_speculative_array_type(Node* const array, const TypeAryPtr*& array_type, const Type*& element_type) {
 484   Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
 485   ciKlass* speculative_array_type = array_type->speculative_type();
 486   if (too_many_traps_or_recompiles(reason) || speculative_array_type == nullptr) {
 487     // No speculative type, check profile data at this bci
 488     speculative_array_type = nullptr;
 489     reason = Deoptimization::Reason_class_check;
 490     if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 491       ciKlass* profiled_element_type = nullptr;
 492       ProfilePtrKind element_ptr = ProfileMaybeNull;
 493       bool flat_array = true;
 494       bool null_free_array = true;
 495       method()->array_access_profiled_type(bci(), speculative_array_type, profiled_element_type, element_ptr, flat_array,
 496                                            null_free_array);
 497     }
 498   }
 499   if (speculative_array_type != nullptr) {
 500     // Speculate that this array has the exact type reported by profile data
 501     Node* casted_array = nullptr;
 502     DEBUG_ONLY(Node* old_control = control();)
 503     Node* slow_ctl = type_check_receiver(array, speculative_array_type, 1.0, &casted_array);
 504     if (stopped()) {
 505       // The check always fails and therefore profile information is incorrect. Don't use it.
 506       assert(old_control == slow_ctl, "type check should have been removed");
 507       set_control(slow_ctl);
 508     } else if (!slow_ctl->is_top()) {
 509       { PreserveJVMState pjvms(this);
 510         set_control(slow_ctl);
 511         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 512       }
 513       replace_in_map(array, casted_array);
 514       array_type = _gvn.type(casted_array)->is_aryptr();
 515       element_type = array_type->elem();
 516       return casted_array;
 517     }
 518   }
 519   return array;
 520 }
 521 
 522 // Create a CheckCastPP when the speculative type can improve the current type.
 523 Node* Parse::cast_to_profiled_array_type(Node* const array) {
 524   ciKlass* array_type = nullptr;
 525   ciKlass* element_type = nullptr;
 526   ProfilePtrKind element_ptr = ProfileMaybeNull;
 527   bool flat_array = true;
 528   bool null_free_array = true;
 529   method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 530   if (array_type != nullptr) {
 531     return record_profile_for_speculation(array, array_type, ProfileMaybeNull);
 532   }
 533   return array;
 534 }
 535 
 536 // Speculate that the array is non-null-free. We emit a trap when this turns out to be
 537 // wrong. On the fast path, we add a CheckCastPP to use the non-null-free type.
 538 Node* Parse::speculate_non_null_free_array(Node* const array, const TypeAryPtr*& array_type) {
 539   bool null_free_array = true;
 540   Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 541   if (array_type->speculative() != nullptr &&
 542       array_type->speculative()->is_aryptr()->is_not_null_free() &&
 543       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 544     null_free_array = false;
 545     reason = Deoptimization::Reason_speculate_class_check;
 546   } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
 547     ciKlass* profiled_array_type = nullptr;
 548     ciKlass* profiled_element_type = nullptr;
 549     ProfilePtrKind element_ptr = ProfileMaybeNull;
 550     bool flat_array = true;
 551     method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
 552                                          null_free_array);
 553     reason = Deoptimization::Reason_class_check;
 554   }
 555   if (!null_free_array) {
 556     { // Deoptimize if null-free array
 557       BuildCutout unless(this, null_free_array_test(array, /* null_free = */ false), PROB_MAX);
 558       uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 559     }
 560     assert(!stopped(), "null-free array should have been caught earlier");
 561     Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_null_free()));
 562     replace_in_map(array, casted_array);
 563     array_type = _gvn.type(casted_array)->is_aryptr();
 564     return casted_array;
 565   }
 566   return array;
 567 }
 568 
 569 // Speculate that the array is non-flat. We emit a trap when this turns out to be wrong.
 570 // On the fast path, we add a CheckCastPP to use the non-flat type.
 571 Node* Parse::speculate_non_flat_array(Node* const array, const TypeAryPtr* const array_type) {
 572   bool flat_array = true;
 573   Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 574   if (array_type->speculative() != nullptr &&
 575       array_type->speculative()->is_aryptr()->is_not_flat() &&
 576       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 577     flat_array = false;
 578     reason = Deoptimization::Reason_speculate_class_check;
 579   } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 580     ciKlass* profiled_array_type = nullptr;
 581     ciKlass* profiled_element_type = nullptr;
 582     ProfilePtrKind element_ptr = ProfileMaybeNull;
 583     bool null_free_array = true;
 584     method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
 585                                          null_free_array);
 586     reason = Deoptimization::Reason_class_check;
 587   }
 588   if (!flat_array) {
 589     { // Deoptimize if flat array
 590       BuildCutout unless(this, flat_array_test(array, /* flat = */ false), PROB_MAX);
 591       uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 592     }
 593     assert(!stopped(), "flat array should have been caught earlier");
 594     Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_flat()));
 595     replace_in_map(array, casted_array);
 596     return casted_array;
 597   }
 598   return array;
 599 }
 600 
 601 // returns IfNode
 602 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 603   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 604   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 605   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 606   return iff;
 607 }
 608 
 609 
 610 // sentinel value for the target bci to mark never taken branches
 611 // (according to profiling)
 612 static const int never_reached = INT_MAX;
 613 
 614 //------------------------------helper for tableswitch-------------------------
 615 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 616   // True branch, use existing map info
 617   { PreserveJVMState pjvms(this);
 618     Node *iftrue  = _gvn.transform( new IfTrueNode (iff) );
 619     set_control( iftrue );
 620     if (unc) {
 621       repush_if_args();
 622       uncommon_trap(Deoptimization::Reason_unstable_if,
 623                     Deoptimization::Action_reinterpret,
 624                     nullptr,
 625                     "taken always");
 626     } else {
 627       assert(dest_bci_if_true != never_reached, "inconsistent dest");
 628       merge_new_path(dest_bci_if_true);
 629     }
 630   }
 631 
 632   // False branch
 633   Node *iffalse = _gvn.transform( new IfFalseNode(iff) );
 634   set_control( iffalse );
 635 }
 636 
 637 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 638   // True branch, use existing map info
 639   { PreserveJVMState pjvms(this);
 640     Node *iffalse  = _gvn.transform( new IfFalseNode (iff) );
 641     set_control( iffalse );
 642     if (unc) {
 643       repush_if_args();
 644       uncommon_trap(Deoptimization::Reason_unstable_if,
 645                     Deoptimization::Action_reinterpret,
 646                     nullptr,
 647                     "taken never");
 648     } else {
 649       assert(dest_bci_if_true != never_reached, "inconsistent dest");
 650       merge_new_path(dest_bci_if_true);
 651     }
 652   }
 653 
 654   // False branch
 655   Node *iftrue = _gvn.transform( new IfTrueNode(iff) );
 656   set_control( iftrue );
 657 }
 658 
 659 void Parse::jump_if_always_fork(int dest_bci, bool unc) {
 660   // False branch, use existing map and control()
 661   if (unc) {
 662     repush_if_args();
 663     uncommon_trap(Deoptimization::Reason_unstable_if,
 664                   Deoptimization::Action_reinterpret,
 665                   nullptr,
 666                   "taken never");
 667   } else {
 668     assert(dest_bci != never_reached, "inconsistent dest");
 669     merge_new_path(dest_bci);
 670   }
 671 }
 672 
 673 
 674 extern "C" {
 675   static int jint_cmp(const void *i, const void *j) {
 676     int a = *(jint *)i;
 677     int b = *(jint *)j;
 678     return a > b ? 1 : a < b ? -1 : 0;
 679   }
 680 }
 681 
 682 
 683 class SwitchRange : public StackObj {
 684   // a range of integers coupled with a bci destination
 685   jint _lo;                     // inclusive lower limit
 686   jint _hi;                     // inclusive upper limit
 687   int _dest;
 688   float _cnt;                   // how many times this range was hit according to profiling
 689 
 690 public:
 691   jint lo() const              { return _lo;   }
 692   jint hi() const              { return _hi;   }
 693   int  dest() const            { return _dest; }
 694   bool is_singleton() const    { return _lo == _hi; }
 695   float cnt() const            { return _cnt; }
 696 
 697   void setRange(jint lo, jint hi, int dest, float cnt) {
 698     assert(lo <= hi, "must be a non-empty range");
 699     _lo = lo, _hi = hi; _dest = dest; _cnt = cnt;
 700     assert(_cnt >= 0, "");
 701   }
 702   bool adjoinRange(jint lo, jint hi, int dest, float cnt, bool trim_ranges) {
 703     assert(lo <= hi, "must be a non-empty range");
 704     if (lo == _hi+1) {
 705       // see merge_ranges() comment below
 706       if (trim_ranges) {
 707         if (cnt == 0) {
 708           if (_cnt != 0) {
 709             return false;
 710           }
 711           if (dest != _dest) {
 712             _dest = never_reached;
 713           }
 714         } else {
 715           if (_cnt == 0) {
 716             return false;
 717           }
 718           if (dest != _dest) {
 719             return false;
 720           }
 721         }
 722       } else {
 723         if (dest != _dest) {
 724           return false;
 725         }
 726       }
 727       _hi = hi;
 728       _cnt += cnt;
 729       return true;
 730     }
 731     return false;
 732   }
 733 
 734   void set (jint value, int dest, float cnt) {
 735     setRange(value, value, dest, cnt);
 736   }
 737   bool adjoin(jint value, int dest, float cnt, bool trim_ranges) {
 738     return adjoinRange(value, value, dest, cnt, trim_ranges);
 739   }
 740   bool adjoin(SwitchRange& other) {
 741     return adjoinRange(other._lo, other._hi, other._dest, other._cnt, false);
 742   }
 743 
 744   void print() {
 745     if (is_singleton())
 746       tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt());
 747     else if (lo() == min_jint)
 748       tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt());
 749     else if (hi() == max_jint)
 750       tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt());
 751     else
 752       tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt());
 753   }
 754 };
 755 
 756 // We try to minimize the number of ranges and the size of the taken
 757 // ones using profiling data. When ranges are created,
 758 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge
 759 // if both were never hit or both were hit to build longer unreached
 760 // ranges. Here, we now merge adjoining ranges with the same
 761 // destination and finally set destination of unreached ranges to the
 762 // special value never_reached because it can help minimize the number
 763 // of tests that are necessary.
 764 //
 765 // For instance:
 766 // [0, 1] to target1 sometimes taken
 767 // [1, 2] to target1 never taken
 768 // [2, 3] to target2 never taken
 769 // would lead to:
 770 // [0, 1] to target1 sometimes taken
 771 // [1, 3] never taken
 772 //
 773 // (first 2 ranges to target1 are not merged)
 774 static void merge_ranges(SwitchRange* ranges, int& rp) {
 775   if (rp == 0) {
 776     return;
 777   }
 778   int shift = 0;
 779   for (int j = 0; j < rp; j++) {
 780     SwitchRange& r1 = ranges[j-shift];
 781     SwitchRange& r2 = ranges[j+1];
 782     if (r1.adjoin(r2)) {
 783       shift++;
 784     } else if (shift > 0) {
 785       ranges[j+1-shift] = r2;
 786     }
 787   }
 788   rp -= shift;
 789   for (int j = 0; j <= rp; j++) {
 790     SwitchRange& r = ranges[j];
 791     if (r.cnt() == 0 && r.dest() != never_reached) {
 792       r.setRange(r.lo(), r.hi(), never_reached, r.cnt());
 793     }
 794   }
 795 }
 796 
 797 //-------------------------------do_tableswitch--------------------------------
 798 void Parse::do_tableswitch() {
 799   // Get information about tableswitch
 800   int default_dest = iter().get_dest_table(0);
 801   jint lo_index    = iter().get_int_table(1);
 802   jint hi_index    = iter().get_int_table(2);
 803   int len          = hi_index - lo_index + 1;
 804 
 805   if (len < 1) {
 806     // If this is a backward branch, add safepoint
 807     maybe_add_safepoint(default_dest);
 808     pop(); // the effect of the instruction execution on the operand stack
 809     merge(default_dest);
 810     return;
 811   }
 812 
 813   ciMethodData* methodData = method()->method_data();
 814   ciMultiBranchData* profile = nullptr;
 815   if (methodData->is_mature() && UseSwitchProfiling) {
 816     ciProfileData* data = methodData->bci_to_data(bci());
 817     if (data != nullptr && data->is_MultiBranchData()) {
 818       profile = (ciMultiBranchData*)data;
 819     }
 820   }
 821   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
 822 
 823   // generate decision tree, using trichotomy when possible
 824   int rnum = len+2;
 825   bool makes_backward_branch = (default_dest <= bci());
 826   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 827   int rp = -1;
 828   if (lo_index != min_jint) {
 829     float cnt = 1.0F;
 830     if (profile != nullptr) {
 831       cnt = (float)profile->default_count() / (hi_index != max_jint ? 2.0F : 1.0F);
 832     }
 833     ranges[++rp].setRange(min_jint, lo_index-1, default_dest, cnt);
 834   }
 835   for (int j = 0; j < len; j++) {
 836     jint match_int = lo_index+j;
 837     int  dest      = iter().get_dest_table(j+3);
 838     makes_backward_branch |= (dest <= bci());
 839     float cnt = 1.0F;
 840     if (profile != nullptr) {
 841       cnt = (float)profile->count_at(j);
 842     }
 843     if (rp < 0 || !ranges[rp].adjoin(match_int, dest, cnt, trim_ranges)) {
 844       ranges[++rp].set(match_int, dest, cnt);
 845     }
 846   }
 847   jint highest = lo_index+(len-1);
 848   assert(ranges[rp].hi() == highest, "");
 849   if (highest != max_jint) {
 850     float cnt = 1.0F;
 851     if (profile != nullptr) {
 852       cnt = (float)profile->default_count() / (lo_index != min_jint ? 2.0F : 1.0F);
 853     }
 854     if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, cnt, trim_ranges)) {
 855       ranges[++rp].setRange(highest+1, max_jint, default_dest, cnt);
 856     }
 857   }
 858   assert(rp < len+2, "not too many ranges");
 859 
 860   if (trim_ranges) {
 861     merge_ranges(ranges, rp);
 862   }
 863 
 864   // Safepoint in case if backward branch observed
 865   if (makes_backward_branch) {
 866     add_safepoint();
 867   }
 868 
 869   Node* lookup = pop(); // lookup value
 870   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 871 }
 872 
 873 
 874 //------------------------------do_lookupswitch--------------------------------
 875 void Parse::do_lookupswitch() {
 876   // Get information about lookupswitch
 877   int default_dest = iter().get_dest_table(0);
 878   jint len          = iter().get_int_table(1);
 879 
 880   if (len < 1) {    // If this is a backward branch, add safepoint
 881     maybe_add_safepoint(default_dest);
 882     pop(); // the effect of the instruction execution on the operand stack
 883     merge(default_dest);
 884     return;
 885   }
 886 
 887   ciMethodData* methodData = method()->method_data();
 888   ciMultiBranchData* profile = nullptr;
 889   if (methodData->is_mature() && UseSwitchProfiling) {
 890     ciProfileData* data = methodData->bci_to_data(bci());
 891     if (data != nullptr && data->is_MultiBranchData()) {
 892       profile = (ciMultiBranchData*)data;
 893     }
 894   }
 895   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
 896 
 897   // generate decision tree, using trichotomy when possible
 898   jint* table = NEW_RESOURCE_ARRAY(jint, len*3);
 899   {
 900     for (int j = 0; j < len; j++) {
 901       table[3*j+0] = iter().get_int_table(2+2*j);
 902       table[3*j+1] = iter().get_dest_table(2+2*j+1);
 903       // Handle overflow when converting from uint to jint
 904       table[3*j+2] = (profile == nullptr) ? 1 : (jint)MIN2<uint>((uint)max_jint, profile->count_at(j));
 905     }
 906     qsort(table, len, 3*sizeof(table[0]), jint_cmp);
 907   }
 908 
 909   float default_cnt = 1.0F;
 910   if (profile != nullptr) {
 911     juint defaults = max_juint - len;
 912     default_cnt = (float)profile->default_count()/(float)defaults;
 913   }
 914 
 915   int rnum = len*2+1;
 916   bool makes_backward_branch = (default_dest <= bci());
 917   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 918   int rp = -1;
 919   for (int j = 0; j < len; j++) {
 920     jint match_int   = table[3*j+0];
 921     jint  dest        = table[3*j+1];
 922     jint  cnt         = table[3*j+2];
 923     jint  next_lo     = rp < 0 ? min_jint : ranges[rp].hi()+1;
 924     makes_backward_branch |= (dest <= bci());
 925     float c = default_cnt * ((float)match_int - (float)next_lo);
 926     if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, c, trim_ranges))) {
 927       assert(default_dest != never_reached, "sentinel value for dead destinations");
 928       ranges[++rp].setRange(next_lo, match_int-1, default_dest, c);
 929     }
 930     if (rp < 0 || !ranges[rp].adjoin(match_int, dest, (float)cnt, trim_ranges)) {
 931       assert(dest != never_reached, "sentinel value for dead destinations");
 932       ranges[++rp].set(match_int, dest,  (float)cnt);
 933     }
 934   }
 935   jint highest = table[3*(len-1)];
 936   assert(ranges[rp].hi() == highest, "");
 937   if (highest != max_jint &&
 938       !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest), trim_ranges)) {
 939     ranges[++rp].setRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest));
 940   }
 941   assert(rp < rnum, "not too many ranges");
 942 
 943   if (trim_ranges) {
 944     merge_ranges(ranges, rp);
 945   }
 946 
 947   // Safepoint in case backward branch observed
 948   if (makes_backward_branch) {
 949     add_safepoint();
 950   }
 951 
 952   Node *lookup = pop(); // lookup value
 953   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 954 }
 955 
 956 static float if_prob(float taken_cnt, float total_cnt) {
 957   assert(taken_cnt <= total_cnt, "");
 958   if (total_cnt == 0) {
 959     return PROB_FAIR;
 960   }
 961   float p = taken_cnt / total_cnt;
 962   return clamp(p, PROB_MIN, PROB_MAX);
 963 }
 964 
 965 static float if_cnt(float cnt) {
 966   if (cnt == 0) {
 967     return COUNT_UNKNOWN;
 968   }
 969   return cnt;
 970 }
 971 
 972 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) {
 973   float total_cnt = 0;
 974   for (SwitchRange* sr = lo; sr <= hi; sr++) {
 975     total_cnt += sr->cnt();
 976   }
 977   return total_cnt;
 978 }
 979 
 980 class SwitchRanges : public ResourceObj {
 981 public:
 982   SwitchRange* _lo;
 983   SwitchRange* _hi;
 984   SwitchRange* _mid;
 985   float _cost;
 986 
 987   enum {
 988     Start,
 989     LeftDone,
 990     RightDone,
 991     Done
 992   } _state;
 993 
 994   SwitchRanges(SwitchRange *lo, SwitchRange *hi)
 995     : _lo(lo), _hi(hi), _mid(nullptr),
 996       _cost(0), _state(Start) {
 997   }
 998 
 999   SwitchRanges()
1000     : _lo(nullptr), _hi(nullptr), _mid(nullptr),
1001       _cost(0), _state(Start) {}
1002 };
1003 
1004 // Estimate cost of performing a binary search on lo..hi
1005 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) {
1006   GrowableArray<SwitchRanges> tree;
1007   SwitchRanges root(lo, hi);
1008   tree.push(root);
1009 
1010   float cost = 0;
1011   do {
1012     SwitchRanges& r = *tree.adr_at(tree.length()-1);
1013     if (r._hi != r._lo) {
1014       if (r._mid == nullptr) {
1015         float r_cnt = sum_of_cnts(r._lo, r._hi);
1016 
1017         if (r_cnt == 0) {
1018           tree.pop();
1019           cost = 0;
1020           continue;
1021         }
1022 
1023         SwitchRange* mid = nullptr;
1024         mid = r._lo;
1025         for (float cnt = 0; ; ) {
1026           assert(mid <= r._hi, "out of bounds");
1027           cnt += mid->cnt();
1028           if (cnt > r_cnt / 2) {
1029             break;
1030           }
1031           mid++;
1032         }
1033         assert(mid <= r._hi, "out of bounds");
1034         r._mid = mid;
1035         r._cost = r_cnt / total_cnt;
1036       }
1037       r._cost += cost;
1038       if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) {
1039         cost = 0;
1040         r._state = SwitchRanges::LeftDone;
1041         tree.push(SwitchRanges(r._lo, r._mid-1));
1042       } else if (r._state < SwitchRanges::RightDone) {
1043         cost = 0;
1044         r._state = SwitchRanges::RightDone;
1045         tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi));
1046       } else {
1047         tree.pop();
1048         cost = r._cost;
1049       }
1050     } else {
1051       tree.pop();
1052       cost = r._cost;
1053     }
1054   } while (tree.length() > 0);
1055 
1056 
1057   return cost;
1058 }
1059 
1060 // It sometimes pays off to test most common ranges before the binary search
1061 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) {
1062   uint nr = hi - lo + 1;
1063   float total_cnt = sum_of_cnts(lo, hi);
1064 
1065   float min = compute_tree_cost(lo, hi, total_cnt);
1066   float extra = 1;
1067   float sub = 0;
1068 
1069   SwitchRange* array1 = lo;
1070   SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr);
1071 
1072   SwitchRange* ranges = nullptr;
1073 
1074   while (nr >= 2) {
1075     assert(lo == array1 || lo == array2, "one the 2 already allocated arrays");
1076     ranges = (lo == array1) ? array2 : array1;
1077 
1078     // Find highest frequency range
1079     SwitchRange* candidate = lo;
1080     for (SwitchRange* sr = lo+1; sr <= hi; sr++) {
1081       if (sr->cnt() > candidate->cnt()) {
1082         candidate = sr;
1083       }
1084     }
1085     SwitchRange most_freq = *candidate;
1086     if (most_freq.cnt() == 0) {
1087       break;
1088     }
1089 
1090     // Copy remaining ranges into another array
1091     int shift = 0;
1092     for (uint i = 0; i < nr; i++) {
1093       SwitchRange* sr = &lo[i];
1094       if (sr != candidate) {
1095         ranges[i-shift] = *sr;
1096       } else {
1097         shift++;
1098         if (i > 0 && i < nr-1) {
1099           SwitchRange prev = lo[i-1];
1100           prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.cnt());
1101           if (prev.adjoin(lo[i+1])) {
1102             shift++;
1103             i++;
1104           }
1105           ranges[i-shift] = prev;
1106         }
1107       }
1108     }
1109     nr -= shift;
1110 
1111     // Evaluate cost of testing the most common range and performing a
1112     // binary search on the other ranges
1113     float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt);
1114     if (cost >= min) {
1115       break;
1116     }
1117     // swap arrays
1118     lo = &ranges[0];
1119     hi = &ranges[nr-1];
1120 
1121     // It pays off: emit the test for the most common range
1122     assert(most_freq.cnt() > 0, "must be taken");
1123     Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo())));
1124     Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(java_subtract(most_freq.hi(), most_freq.lo()))));
1125     Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le));
1126     IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt()));
1127     jump_if_true_fork(iff, most_freq.dest(), false);
1128 
1129     sub += most_freq.cnt() / total_cnt;
1130     extra += 1 - sub;
1131     min = cost;
1132   }
1133 }
1134 
1135 //----------------------------create_jump_tables-------------------------------
1136 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
1137   // Are jumptables enabled
1138   if (!UseJumpTables)  return false;
1139 
1140   // Are jumptables supported
1141   if (!Matcher::has_match_rule(Op_Jump))  return false;
1142 
1143   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1144 
1145   // Decide if a guard is needed to lop off big ranges at either (or
1146   // both) end(s) of the input set. We'll call this the default target
1147   // even though we can't be sure that it is the true "default".
1148 
1149   bool needs_guard = false;
1150   int default_dest;
1151   int64_t total_outlier_size = 0;
1152   int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
1153   int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
1154 
1155   if (lo->dest() == hi->dest()) {
1156     total_outlier_size = hi_size + lo_size;
1157     default_dest = lo->dest();
1158   } else if (lo_size > hi_size) {
1159     total_outlier_size = lo_size;
1160     default_dest = lo->dest();
1161   } else {
1162     total_outlier_size = hi_size;
1163     default_dest = hi->dest();
1164   }
1165 
1166   float total = sum_of_cnts(lo, hi);
1167   float cost = compute_tree_cost(lo, hi, total);
1168 
1169   // If a guard test will eliminate very sparse end ranges, then
1170   // it is worth the cost of an extra jump.
1171   float trimmed_cnt = 0;
1172   if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
1173     needs_guard = true;
1174     if (default_dest == lo->dest()) {
1175       trimmed_cnt += lo->cnt();
1176       lo++;
1177     }
1178     if (default_dest == hi->dest()) {
1179       trimmed_cnt += hi->cnt();
1180       hi--;
1181     }
1182   }
1183 
1184   // Find the total number of cases and ranges
1185   int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1;
1186   int num_range = hi - lo + 1;
1187 
1188   // Don't create table if: too large, too small, or too sparse.
1189   if (num_cases > MaxJumpTableSize)
1190     return false;
1191   if (UseSwitchProfiling) {
1192     // MinJumpTableSize is set so with a well balanced binary tree,
1193     // when the number of ranges is MinJumpTableSize, it's cheaper to
1194     // go through a JumpNode that a tree of IfNodes. Average cost of a
1195     // tree of IfNodes with MinJumpTableSize is
1196     // log2f(MinJumpTableSize) comparisons. So if the cost computed
1197     // from profile data is less than log2f(MinJumpTableSize) then
1198     // going with the binary search is cheaper.
1199     if (cost < log2f(MinJumpTableSize)) {
1200       return false;
1201     }
1202   } else {
1203     if (num_cases < MinJumpTableSize)
1204       return false;
1205   }
1206   if (num_cases > (MaxJumpTableSparseness * num_range))
1207     return false;
1208 
1209   // Normalize table lookups to zero
1210   int lowval = lo->lo();
1211   key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) );
1212 
1213   // Generate a guard to protect against input keyvals that aren't
1214   // in the switch domain.
1215   if (needs_guard) {
1216     Node*   size = _gvn.intcon(num_cases);
1217     Node*   cmp = _gvn.transform(new CmpUNode(key_val, size));
1218     Node*   tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge));
1219     IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt));
1220     jump_if_true_fork(iff, default_dest, trim_ranges && trimmed_cnt == 0);
1221 
1222     total -= trimmed_cnt;
1223   }
1224 
1225   // Create an ideal node JumpTable that has projections
1226   // of all possible ranges for a switch statement
1227   // The key_val input must be converted to a pointer offset and scaled.
1228   // Compare Parse::array_addressing above.
1229 
1230   // Clean the 32-bit int into a real 64-bit offset.
1231   // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
1232   // Make I2L conversion control dependent to prevent it from
1233   // floating above the range check during loop optimizations.
1234   // Do not use a narrow int type here to prevent the data path from dying
1235   // while the control path is not removed. This can happen if the type of key_val
1236   // is later known to be out of bounds of [0, num_cases] and therefore a narrow cast
1237   // would be replaced by TOP while C2 is not able to fold the corresponding range checks.
1238   // Set _carry_dependency for the cast to avoid being removed by IGVN.
1239 #ifdef _LP64
1240   key_val = C->constrained_convI2L(&_gvn, key_val, TypeInt::INT, control(), true /* carry_dependency */);
1241 #endif
1242 
1243   // Shift the value by wordsize so we have an index into the table, rather
1244   // than a switch value
1245   Node *shiftWord = _gvn.MakeConX(wordSize);
1246   key_val = _gvn.transform( new MulXNode( key_val, shiftWord));
1247 
1248   // Create the JumpNode
1249   Arena* arena = C->comp_arena();
1250   float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases);
1251   int i = 0;
1252   if (total == 0) {
1253     for (SwitchRange* r = lo; r <= hi; r++) {
1254       for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1255         probs[i] = 1.0F / num_cases;
1256       }
1257     }
1258   } else {
1259     for (SwitchRange* r = lo; r <= hi; r++) {
1260       float prob = r->cnt()/total;
1261       for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1262         probs[i] = prob / (r->hi() - r->lo() + 1);
1263       }
1264     }
1265   }
1266 
1267   ciMethodData* methodData = method()->method_data();
1268   ciMultiBranchData* profile = nullptr;
1269   if (methodData->is_mature()) {
1270     ciProfileData* data = methodData->bci_to_data(bci());
1271     if (data != nullptr && data->is_MultiBranchData()) {
1272       profile = (ciMultiBranchData*)data;
1273     }
1274   }
1275 
1276   Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == nullptr ? COUNT_UNKNOWN : total));
1277 
1278   // These are the switch destinations hanging off the jumpnode
1279   i = 0;
1280   for (SwitchRange* r = lo; r <= hi; r++) {
1281     for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1282       Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
1283       {
1284         PreserveJVMState pjvms(this);
1285         set_control(input);
1286         jump_if_always_fork(r->dest(), trim_ranges && r->cnt() == 0);
1287       }
1288     }
1289   }
1290   assert(i == num_cases, "miscount of cases");
1291   stop_and_kill_map();  // no more uses for this JVMS
1292   return true;
1293 }
1294 
1295 //----------------------------jump_switch_ranges-------------------------------
1296 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
1297   Block* switch_block = block();
1298   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1299 
1300   if (switch_depth == 0) {
1301     // Do special processing for the top-level call.
1302     assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
1303     assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
1304 
1305     // Decrement pred-numbers for the unique set of nodes.
1306 #ifdef ASSERT
1307     if (!trim_ranges) {
1308       // Ensure that the block's successors are a (duplicate-free) set.
1309       int successors_counted = 0;  // block occurrences in [hi..lo]
1310       int unique_successors = switch_block->num_successors();
1311       for (int i = 0; i < unique_successors; i++) {
1312         Block* target = switch_block->successor_at(i);
1313 
1314         // Check that the set of successors is the same in both places.
1315         int successors_found = 0;
1316         for (SwitchRange* p = lo; p <= hi; p++) {
1317           if (p->dest() == target->start())  successors_found++;
1318         }
1319         assert(successors_found > 0, "successor must be known");
1320         successors_counted += successors_found;
1321       }
1322       assert(successors_counted == (hi-lo)+1, "no unexpected successors");
1323     }
1324 #endif
1325 
1326     // Maybe prune the inputs, based on the type of key_val.
1327     jint min_val = min_jint;
1328     jint max_val = max_jint;
1329     const TypeInt* ti = key_val->bottom_type()->isa_int();
1330     if (ti != nullptr) {
1331       min_val = ti->_lo;
1332       max_val = ti->_hi;
1333       assert(min_val <= max_val, "invalid int type");
1334     }
1335     while (lo->hi() < min_val) {
1336       lo++;
1337     }
1338     if (lo->lo() < min_val)  {
1339       lo->setRange(min_val, lo->hi(), lo->dest(), lo->cnt());
1340     }
1341     while (hi->lo() > max_val) {
1342       hi--;
1343     }
1344     if (hi->hi() > max_val) {
1345       hi->setRange(hi->lo(), max_val, hi->dest(), hi->cnt());
1346     }
1347 
1348     linear_search_switch_ranges(key_val, lo, hi);
1349   }
1350 
1351 #ifndef PRODUCT
1352   if (switch_depth == 0) {
1353     _max_switch_depth = 0;
1354     _est_switch_depth = log2i_graceful((hi - lo + 1) - 1) + 1;
1355   }
1356 #endif
1357 
1358   assert(lo <= hi, "must be a non-empty set of ranges");
1359   if (lo == hi) {
1360     jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1361   } else {
1362     assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
1363     assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
1364 
1365     if (create_jump_tables(key_val, lo, hi)) return;
1366 
1367     SwitchRange* mid = nullptr;
1368     float total_cnt = sum_of_cnts(lo, hi);
1369 
1370     int nr = hi - lo + 1;
1371     if (UseSwitchProfiling) {
1372       // Don't keep the binary search tree balanced: pick up mid point
1373       // that split frequencies in half.
1374       float cnt = 0;
1375       for (SwitchRange* sr = lo; sr <= hi; sr++) {
1376         cnt += sr->cnt();
1377         if (cnt >= total_cnt / 2) {
1378           mid = sr;
1379           break;
1380         }
1381       }
1382     } else {
1383       mid = lo + nr/2;
1384 
1385       // if there is an easy choice, pivot at a singleton:
1386       if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton())  mid--;
1387 
1388       assert(lo < mid && mid <= hi, "good pivot choice");
1389       assert(nr != 2 || mid == hi,   "should pick higher of 2");
1390       assert(nr != 3 || mid == hi-1, "should pick middle of 3");
1391     }
1392 
1393 
1394     Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo());
1395 
1396     if (mid->is_singleton()) {
1397       IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt()));
1398       jump_if_false_fork(iff_ne, mid->dest(), trim_ranges && mid->cnt() == 0);
1399 
1400       // Special Case:  If there are exactly three ranges, and the high
1401       // and low range each go to the same place, omit the "gt" test,
1402       // since it will not discriminate anything.
1403       bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo;
1404 
1405       // if there is a higher range, test for it and process it:
1406       if (mid < hi && !eq_test_only) {
1407         // two comparisons of same values--should enable 1 test for 2 branches
1408         // Use BoolTest::lt instead of BoolTest::gt
1409         float cnt = sum_of_cnts(lo, mid-1);
1410         IfNode *iff_lt  = jump_if_fork_int(key_val, test_val, BoolTest::lt, if_prob(cnt, total_cnt), if_cnt(cnt));
1411         Node   *iftrue  = _gvn.transform( new IfTrueNode(iff_lt) );
1412         Node   *iffalse = _gvn.transform( new IfFalseNode(iff_lt) );
1413         { PreserveJVMState pjvms(this);
1414           set_control(iffalse);
1415           jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
1416         }
1417         set_control(iftrue);
1418       }
1419 
1420     } else {
1421       // mid is a range, not a singleton, so treat mid..hi as a unit
1422       float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi);
1423       IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt));
1424 
1425       // if there is a higher range, test for it and process it:
1426       if (mid == hi) {
1427         jump_if_true_fork(iff_ge, mid->dest(), trim_ranges && cnt == 0);
1428       } else {
1429         Node *iftrue  = _gvn.transform( new IfTrueNode(iff_ge) );
1430         Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) );
1431         { PreserveJVMState pjvms(this);
1432           set_control(iftrue);
1433           jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1);
1434         }
1435         set_control(iffalse);
1436       }
1437     }
1438 
1439     // in any case, process the lower range
1440     if (mid == lo) {
1441       if (mid->is_singleton()) {
1442         jump_switch_ranges(key_val, lo+1, hi, switch_depth+1);
1443       } else {
1444         jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1445       }
1446     } else {
1447       jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
1448     }
1449   }
1450 
1451   // Decrease pred_count for each successor after all is done.
1452   if (switch_depth == 0) {
1453     int unique_successors = switch_block->num_successors();
1454     for (int i = 0; i < unique_successors; i++) {
1455       Block* target = switch_block->successor_at(i);
1456       // Throw away the pre-allocated path for each unique successor.
1457       target->next_path_num();
1458     }
1459   }
1460 
1461 #ifndef PRODUCT
1462   _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
1463   if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
1464     SwitchRange* r;
1465     int nsing = 0;
1466     for( r = lo; r <= hi; r++ ) {
1467       if( r->is_singleton() )  nsing++;
1468     }
1469     tty->print(">>> ");
1470     _method->print_short_name();
1471     tty->print_cr(" switch decision tree");
1472     tty->print_cr("    %d ranges (%d singletons), max_depth=%d, est_depth=%d",
1473                   (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth);
1474     if (_max_switch_depth > _est_switch_depth) {
1475       tty->print_cr("******** BAD SWITCH DEPTH ********");
1476     }
1477     tty->print("   ");
1478     for( r = lo; r <= hi; r++ ) {
1479       r->print();
1480     }
1481     tty->cr();
1482   }
1483 #endif
1484 }
1485 
1486 Node* Parse::floating_point_mod(Node* a, Node* b, BasicType type) {
1487   assert(type == BasicType::T_FLOAT || type == BasicType::T_DOUBLE, "only float and double are floating points");
1488   CallNode* mod = type == BasicType::T_DOUBLE ? static_cast<CallNode*>(new ModDNode(C, a, b)) : new ModFNode(C, a, b);
1489 
1490   Node* prev_mem = set_predefined_input_for_runtime_call(mod);
1491   mod = _gvn.transform(mod)->as_Call();
1492   set_predefined_output_for_runtime_call(mod, prev_mem, TypeRawPtr::BOTTOM);
1493   Node* result = _gvn.transform(new ProjNode(mod, TypeFunc::Parms + 0));
1494   record_for_igvn(mod);
1495   return result;
1496 }
1497 
1498 void Parse::l2f() {
1499   Node* f2 = pop();
1500   Node* f1 = pop();
1501   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
1502                               CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
1503                               "l2f", nullptr, //no memory effects
1504                               f1, f2);
1505   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1506 
1507   push(res);
1508 }
1509 
1510 // Handle jsr and jsr_w bytecode
1511 void Parse::do_jsr() {
1512   assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
1513 
1514   // Store information about current state, tagged with new _jsr_bci
1515   int return_bci = iter().next_bci();
1516   int jsr_bci    = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
1517 
1518   // The way we do things now, there is only one successor block
1519   // for the jsr, because the target code is cloned by ciTypeFlow.
1520   Block* target = successor_for_bci(jsr_bci);
1521 
1522   // What got pushed?
1523   const Type* ret_addr = target->peek();
1524   assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
1525 
1526   // Effect on jsr on stack
1527   push(_gvn.makecon(ret_addr));
1528 
1529   // Flow to the jsr.
1530   merge(jsr_bci);
1531 }
1532 
1533 // Handle ret bytecode
1534 void Parse::do_ret() {
1535   // Find to whom we return.
1536   assert(block()->num_successors() == 1, "a ret can only go one place now");
1537   Block* target = block()->successor_at(0);
1538   assert(!target->is_ready(), "our arrival must be expected");
1539   int pnum = target->next_path_num();
1540   merge_common(target, pnum);
1541 }
1542 
1543 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
1544   if (btest != BoolTest::eq && btest != BoolTest::ne) {
1545     // Only ::eq and ::ne are supported for profile injection.
1546     return false;
1547   }
1548   if (test->is_Cmp() &&
1549       test->in(1)->Opcode() == Op_ProfileBoolean) {
1550     ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
1551     int false_cnt = profile->false_count();
1552     int  true_cnt = profile->true_count();
1553 
1554     // Counts matching depends on the actual test operation (::eq or ::ne).
1555     // No need to scale the counts because profile injection was designed
1556     // to feed exact counts into VM.
1557     taken     = (btest == BoolTest::eq) ? false_cnt :  true_cnt;
1558     not_taken = (btest == BoolTest::eq) ?  true_cnt : false_cnt;
1559 
1560     profile->consume();
1561     return true;
1562   }
1563   return false;
1564 }
1565 
1566 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1567 // We also check that individual counters are positive first, otherwise the sum can become positive.
1568 // (check for saturation, integer overflow, and immature counts)
1569 static bool counters_are_meaningful(int counter1, int counter2, int min) {
1570   // check for saturation, including "uint" values too big to fit in "int"
1571   if (counter1 < 0 || counter2 < 0) {
1572     return false;
1573   }
1574   // check for integer overflow of the sum
1575   int64_t sum = (int64_t)counter1 + (int64_t)counter2;
1576   STATIC_ASSERT(sizeof(counter1) < sizeof(sum));
1577   if (sum > INT_MAX) {
1578     return false;
1579   }
1580   // check if mature
1581   return (counter1 + counter2) >= min;
1582 }
1583 
1584 //--------------------------dynamic_branch_prediction--------------------------
1585 // Try to gather dynamic branch prediction behavior.  Return a probability
1586 // of the branch being taken and set the "cnt" field.  Returns a -1.0
1587 // if we need to use static prediction for some reason.
1588 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
1589   ResourceMark rm;
1590 
1591   cnt  = COUNT_UNKNOWN;
1592 
1593   int     taken = 0;
1594   int not_taken = 0;
1595 
1596   bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
1597 
1598   if (use_mdo) {
1599     // Use MethodData information if it is available
1600     // FIXME: free the ProfileData structure
1601     ciMethodData* methodData = method()->method_data();
1602     if (!methodData->is_mature())  return PROB_UNKNOWN;
1603     ciProfileData* data = methodData->bci_to_data(bci());
1604     if (data == nullptr) {
1605       return PROB_UNKNOWN;
1606     }
1607     if (!data->is_JumpData())  return PROB_UNKNOWN;
1608 
1609     // get taken and not taken values
1610     // NOTE: saturated UINT_MAX values become negative,
1611     // as do counts above INT_MAX.
1612     taken = data->as_JumpData()->taken();
1613     not_taken = 0;
1614     if (data->is_BranchData()) {
1615       not_taken = data->as_BranchData()->not_taken();
1616     }
1617 
1618     // scale the counts to be commensurate with invocation counts:
1619     // NOTE: overflow for positive values is clamped at INT_MAX
1620     taken = method()->scale_count(taken);
1621     not_taken = method()->scale_count(not_taken);
1622   }
1623   // At this point, saturation or overflow is indicated by INT_MAX
1624   // or a negative value.
1625 
1626   // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1627   // We also check that individual counters are positive first, otherwise the sum can become positive.
1628   if (!counters_are_meaningful(taken, not_taken, 40)) {
1629     if (C->log() != nullptr) {
1630       C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
1631     }
1632     return PROB_UNKNOWN;
1633   }
1634 
1635   // Compute frequency that we arrive here
1636   float sum = taken + not_taken;
1637   // Adjust, if this block is a cloned private block but the
1638   // Jump counts are shared.  Taken the private counts for
1639   // just this path instead of the shared counts.
1640   if( block()->count() > 0 )
1641     sum = block()->count();
1642   cnt = sum / FreqCountInvocations;
1643 
1644   // Pin probability to sane limits
1645   float prob;
1646   if( !taken )
1647     prob = (0+PROB_MIN) / 2;
1648   else if( !not_taken )
1649     prob = (1+PROB_MAX) / 2;
1650   else {                         // Compute probability of true path
1651     prob = (float)taken / (float)(taken + not_taken);
1652     if (prob > PROB_MAX)  prob = PROB_MAX;
1653     if (prob < PROB_MIN)   prob = PROB_MIN;
1654   }
1655 
1656   assert((cnt > 0.0f) && (prob > 0.0f),
1657          "Bad frequency assignment in if cnt=%g prob=%g taken=%d not_taken=%d", cnt, prob, taken, not_taken);
1658 
1659   if (C->log() != nullptr) {
1660     const char* prob_str = nullptr;
1661     if (prob >= PROB_MAX)  prob_str = (prob == PROB_MAX) ? "max" : "always";
1662     if (prob <= PROB_MIN)  prob_str = (prob == PROB_MIN) ? "min" : "never";
1663     char prob_str_buf[30];
1664     if (prob_str == nullptr) {
1665       jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob);
1666       prob_str = prob_str_buf;
1667     }
1668     C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
1669                    iter().get_dest(), taken, not_taken, cnt, prob_str);
1670   }
1671   return prob;
1672 }
1673 
1674 //-----------------------------branch_prediction-------------------------------
1675 float Parse::branch_prediction(float& cnt,
1676                                BoolTest::mask btest,
1677                                int target_bci,
1678                                Node* test) {
1679   float prob = dynamic_branch_prediction(cnt, btest, test);
1680   // If prob is unknown, switch to static prediction
1681   if (prob != PROB_UNKNOWN)  return prob;
1682 
1683   prob = PROB_FAIR;                   // Set default value
1684   if (btest == BoolTest::eq)          // Exactly equal test?
1685     prob = PROB_STATIC_INFREQUENT;    // Assume its relatively infrequent
1686   else if (btest == BoolTest::ne)
1687     prob = PROB_STATIC_FREQUENT;      // Assume its relatively frequent
1688 
1689   // If this is a conditional test guarding a backwards branch,
1690   // assume its a loop-back edge.  Make it a likely taken branch.
1691   if (target_bci < bci()) {
1692     if (is_osr_parse()) {    // Could be a hot OSR'd loop; force deopt
1693       // Since it's an OSR, we probably have profile data, but since
1694       // branch_prediction returned PROB_UNKNOWN, the counts are too small.
1695       // Let's make a special check here for completely zero counts.
1696       ciMethodData* methodData = method()->method_data();
1697       if (!methodData->is_empty()) {
1698         ciProfileData* data = methodData->bci_to_data(bci());
1699         // Only stop for truly zero counts, which mean an unknown part
1700         // of the OSR-ed method, and we want to deopt to gather more stats.
1701         // If you have ANY counts, then this loop is simply 'cold' relative
1702         // to the OSR loop.
1703         if (data == nullptr ||
1704             (data->as_BranchData()->taken() +  data->as_BranchData()->not_taken() == 0)) {
1705           // This is the only way to return PROB_UNKNOWN:
1706           return PROB_UNKNOWN;
1707         }
1708       }
1709     }
1710     prob = PROB_STATIC_FREQUENT;     // Likely to take backwards branch
1711   }
1712 
1713   assert(prob != PROB_UNKNOWN, "must have some guess at this point");
1714   return prob;
1715 }
1716 
1717 // The magic constants are chosen so as to match the output of
1718 // branch_prediction() when the profile reports a zero taken count.
1719 // It is important to distinguish zero counts unambiguously, because
1720 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
1721 // very small but nonzero probabilities, which if confused with zero
1722 // counts would keep the program recompiling indefinitely.
1723 bool Parse::seems_never_taken(float prob) const {
1724   return prob < PROB_MIN;
1725 }
1726 
1727 //-------------------------------repush_if_args--------------------------------
1728 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
1729 inline int Parse::repush_if_args() {
1730   if (PrintOpto && WizardMode) {
1731     tty->print("defending against excessive implicit null exceptions on %s @%d in ",
1732                Bytecodes::name(iter().cur_bc()), iter().cur_bci());
1733     method()->print_name(); tty->cr();
1734   }
1735   int bc_depth = - Bytecodes::depth(iter().cur_bc());
1736   assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
1737   DEBUG_ONLY(sync_jvms());   // argument(n) requires a synced jvms
1738   assert(argument(0) != nullptr, "must exist");
1739   assert(bc_depth == 1 || argument(1) != nullptr, "two must exist");
1740   inc_sp(bc_depth);
1741   return bc_depth;
1742 }
1743 
1744 // Used by StressUnstableIfTraps
1745 static volatile int _trap_stress_counter = 0;
1746 
1747 void Parse::increment_trap_stress_counter(Node*& counter, Node*& incr_store) {
1748   Node* counter_addr = makecon(TypeRawPtr::make((address)&_trap_stress_counter));
1749   counter = make_load(control(), counter_addr, TypeInt::INT, T_INT, MemNode::unordered);
1750   counter = _gvn.transform(new AddINode(counter, intcon(1)));
1751   incr_store = store_to_memory(control(), counter_addr, counter, T_INT, MemNode::unordered);
1752 }
1753 
1754 //----------------------------------do_ifnull----------------------------------
1755 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
1756   int target_bci = iter().get_dest();
1757 
1758   Node* counter = nullptr;
1759   Node* incr_store = nullptr;
1760   bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1761   if (do_stress_trap) {
1762     increment_trap_stress_counter(counter, incr_store);
1763   }
1764 
1765   Block* branch_block = successor_for_bci(target_bci);
1766   Block* next_block   = successor_for_bci(iter().next_bci());
1767 
1768   float cnt;
1769   float prob = branch_prediction(cnt, btest, target_bci, c);
1770   if (prob == PROB_UNKNOWN) {
1771     // (An earlier version of do_ifnull omitted this trap for OSR methods.)
1772     if (PrintOpto && Verbose) {
1773       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1774     }
1775     repush_if_args(); // to gather stats on loop
1776     uncommon_trap(Deoptimization::Reason_unreached,
1777                   Deoptimization::Action_reinterpret,
1778                   nullptr, "cold");
1779     if (C->eliminate_boxing()) {
1780       // Mark the successor blocks as parsed
1781       branch_block->next_path_num();
1782       next_block->next_path_num();
1783     }
1784     return;
1785   }
1786 
1787   NOT_PRODUCT(explicit_null_checks_inserted++);
1788 
1789   // Generate real control flow
1790   Node   *tst = _gvn.transform( new BoolNode( c, btest ) );
1791 
1792   // Sanity check the probability value
1793   assert(prob > 0.0f,"Bad probability in Parser");
1794  // Need xform to put node in hash table
1795   IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1796   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1797   // True branch
1798   { PreserveJVMState pjvms(this);
1799     Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1800     set_control(iftrue);
1801 
1802     if (stopped()) {            // Path is dead?
1803       NOT_PRODUCT(explicit_null_checks_elided++);
1804       if (C->eliminate_boxing()) {
1805         // Mark the successor block as parsed
1806         branch_block->next_path_num();
1807       }
1808     } else {                    // Path is live.
1809       adjust_map_after_if(btest, c, prob, branch_block);
1810       if (!stopped()) {
1811         merge(target_bci);
1812       }
1813     }
1814   }
1815 
1816   // False branch
1817   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1818   set_control(iffalse);
1819 
1820   if (stopped()) {              // Path is dead?
1821     NOT_PRODUCT(explicit_null_checks_elided++);
1822     if (C->eliminate_boxing()) {
1823       // Mark the successor block as parsed
1824       next_block->next_path_num();
1825     }
1826   } else  {                     // Path is live.
1827     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1828   }
1829 
1830   if (do_stress_trap) {
1831     stress_trap(iff, counter, incr_store);
1832   }
1833 }
1834 
1835 //------------------------------------do_if------------------------------------
1836 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken) {
1837   int target_bci = iter().get_dest();
1838 
1839   Block* branch_block = successor_for_bci(target_bci);
1840   Block* next_block   = successor_for_bci(iter().next_bci());
1841 
1842   float cnt;
1843   float prob = branch_prediction(cnt, btest, target_bci, c);
1844   float untaken_prob = 1.0 - prob;
1845 
1846   if (prob == PROB_UNKNOWN) {
1847     if (PrintOpto && Verbose) {
1848       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1849     }
1850     repush_if_args(); // to gather stats on loop
1851     uncommon_trap(Deoptimization::Reason_unreached,
1852                   Deoptimization::Action_reinterpret,
1853                   nullptr, "cold");
1854     if (C->eliminate_boxing()) {
1855       // Mark the successor blocks as parsed
1856       branch_block->next_path_num();
1857       next_block->next_path_num();
1858     }
1859     return;
1860   }
1861 
1862   Node* counter = nullptr;
1863   Node* incr_store = nullptr;
1864   bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1865   if (do_stress_trap) {
1866     increment_trap_stress_counter(counter, incr_store);
1867   }
1868 
1869   // Sanity check the probability value
1870   assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1871 
1872   bool taken_if_true = true;
1873   // Convert BoolTest to canonical form:
1874   if (!BoolTest(btest).is_canonical()) {
1875     btest         = BoolTest(btest).negate();
1876     taken_if_true = false;
1877     // prob is NOT updated here; it remains the probability of the taken
1878     // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1879   }
1880   assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1881 
1882   Node* tst0 = new BoolNode(c, btest);
1883   Node* tst = _gvn.transform(tst0);
1884   BoolTest::mask taken_btest   = BoolTest::illegal;
1885   BoolTest::mask untaken_btest = BoolTest::illegal;
1886 
1887   if (tst->is_Bool()) {
1888     // Refresh c from the transformed bool node, since it may be
1889     // simpler than the original c.  Also re-canonicalize btest.
1890     // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p null)).
1891     // That can arise from statements like: if (x instanceof C) ...
1892     if (tst != tst0) {
1893       // Canonicalize one more time since transform can change it.
1894       btest = tst->as_Bool()->_test._test;
1895       if (!BoolTest(btest).is_canonical()) {
1896         // Reverse edges one more time...
1897         tst   = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1898         btest = tst->as_Bool()->_test._test;
1899         assert(BoolTest(btest).is_canonical(), "sanity");
1900         taken_if_true = !taken_if_true;
1901       }
1902       c = tst->in(1);
1903     }
1904     BoolTest::mask neg_btest = BoolTest(btest).negate();
1905     taken_btest   = taken_if_true ?     btest : neg_btest;
1906     untaken_btest = taken_if_true ? neg_btest :     btest;
1907   }
1908 
1909   // Generate real control flow
1910   float true_prob = (taken_if_true ? prob : untaken_prob);
1911   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1912   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1913   Node* taken_branch   = new IfTrueNode(iff);
1914   Node* untaken_branch = new IfFalseNode(iff);
1915   if (!taken_if_true) {  // Finish conversion to canonical form
1916     Node* tmp      = taken_branch;
1917     taken_branch   = untaken_branch;
1918     untaken_branch = tmp;
1919   }
1920 
1921   // Branch is taken:
1922   { PreserveJVMState pjvms(this);
1923     taken_branch = _gvn.transform(taken_branch);
1924     set_control(taken_branch);
1925 
1926     if (stopped()) {
1927       if (C->eliminate_boxing() && !new_path) {
1928         // Mark the successor block as parsed (if we haven't created a new path)
1929         branch_block->next_path_num();
1930       }
1931     } else {
1932       adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1933       if (!stopped()) {
1934         if (new_path) {
1935           // Merge by using a new path
1936           merge_new_path(target_bci);
1937         } else if (ctrl_taken != nullptr) {
1938           // Don't merge but save taken branch to be wired by caller
1939           *ctrl_taken = control();
1940         } else {
1941           merge(target_bci);
1942         }
1943       }
1944     }
1945   }
1946 
1947   untaken_branch = _gvn.transform(untaken_branch);
1948   set_control(untaken_branch);
1949 
1950   // Branch not taken.
1951   if (stopped() && ctrl_taken == nullptr) {
1952     if (C->eliminate_boxing()) {
1953       // Mark the successor block as parsed (if caller does not re-wire control flow)
1954       next_block->next_path_num();
1955     }
1956   } else {
1957     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
1958   }
1959 
1960   if (do_stress_trap) {
1961     stress_trap(iff, counter, incr_store);
1962   }
1963 }
1964 
1965 
1966 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1967   if (t->speculative() == nullptr) {
1968     return ProfileUnknownNull;
1969   }
1970   if (t->speculative_always_null()) {
1971     return ProfileAlwaysNull;
1972   }
1973   if (t->speculative_maybe_null()) {
1974     return ProfileMaybeNull;
1975   }
1976   return ProfileNeverNull;
1977 }
1978 
1979 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1980   inc_sp(2);
1981   Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
1982                                  !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1983                                  speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1984   dec_sp(2);
1985   if (btest == BoolTest::ne) {
1986     {
1987       PreserveJVMState pjvms(this);
1988       replace_in_map(input, cast);
1989       int target_bci = iter().get_dest();
1990       merge(target_bci);
1991     }
1992     record_for_igvn(eq_region);
1993     set_control(_gvn.transform(eq_region));
1994   } else {
1995     replace_in_map(input, cast);
1996   }
1997 }
1998 
1999 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
2000   inc_sp(2);
2001   null_ctl = top();
2002   Node* cast = null_check_oop(input, &null_ctl,
2003                               input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
2004                               false,
2005                               speculative_ptr_kind(tinput) == ProfileNeverNull &&
2006                               !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
2007   dec_sp(2);
2008   assert(!stopped(), "null input should have been caught earlier");
2009   return cast;
2010 }
2011 
2012 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
2013   Node* ne_region = new RegionNode(1);
2014   Node* null_ctl;
2015   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2016   ne_region->add_req(null_ctl);
2017 
2018   Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
2019   {
2020     PreserveJVMState pjvms(this);
2021     inc_sp(2);
2022     set_control(slow_ctl);
2023     Deoptimization::DeoptReason reason;
2024     if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2025       reason = Deoptimization::Reason_speculate_class_check;
2026     } else {
2027       reason = Deoptimization::Reason_class_check;
2028     }
2029     uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
2030   }
2031   ne_region->add_req(control());
2032 
2033   record_for_igvn(ne_region);
2034   set_control(_gvn.transform(ne_region));
2035   if (btest == BoolTest::ne) {
2036     {
2037       PreserveJVMState pjvms(this);
2038       if (null_ctl == top()) {
2039         replace_in_map(input, cast);
2040       }
2041       int target_bci = iter().get_dest();
2042       merge(target_bci);
2043     }
2044     record_for_igvn(eq_region);
2045     set_control(_gvn.transform(eq_region));
2046   } else {
2047     if (null_ctl == top()) {
2048       replace_in_map(input, cast);
2049     }
2050     set_control(_gvn.transform(ne_region));
2051   }
2052 }
2053 
2054 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
2055   Node* ne_region = new RegionNode(1);
2056   Node* null_ctl;
2057   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2058   ne_region->add_req(null_ctl);
2059 
2060   {
2061     BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2062     inc_sp(2);
2063     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2064   }
2065 
2066   ne_region->add_req(control());
2067 
2068   record_for_igvn(ne_region);
2069   set_control(_gvn.transform(ne_region));
2070   if (btest == BoolTest::ne) {
2071     {
2072       PreserveJVMState pjvms(this);
2073       if (null_ctl == top()) {
2074         replace_in_map(input, cast);
2075       }
2076       int target_bci = iter().get_dest();
2077       merge(target_bci);
2078     }
2079     record_for_igvn(eq_region);
2080     set_control(_gvn.transform(eq_region));
2081   } else {
2082     if (null_ctl == top()) {
2083       replace_in_map(input, cast);
2084     }
2085     set_control(_gvn.transform(ne_region));
2086   }
2087 }
2088 
2089 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2090   ciKlass* left_type = nullptr;
2091   ciKlass* right_type = nullptr;
2092   ProfilePtrKind left_ptr = ProfileUnknownNull;
2093   ProfilePtrKind right_ptr = ProfileUnknownNull;
2094   bool left_inline_type = true;
2095   bool right_inline_type = true;
2096 
2097   // Leverage profiling at acmp
2098   if (UseACmpProfile) {
2099     method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2100     if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2101       left_type = nullptr;
2102       right_type = nullptr;
2103       left_inline_type = true;
2104       right_inline_type = true;
2105     }
2106     if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2107       left_ptr = ProfileUnknownNull;
2108       right_ptr = ProfileUnknownNull;
2109     }
2110   }
2111 
2112   if (UseTypeSpeculation) {
2113     record_profile_for_speculation(left, left_type, left_ptr);
2114     record_profile_for_speculation(right, right_type, right_ptr);
2115   }
2116 
2117   if (!EnableValhalla) {
2118     Node* cmp = CmpP(left, right);
2119     cmp = optimize_cmp_with_klass(cmp);
2120     do_if(btest, cmp);
2121     return;
2122   }
2123 
2124   // Check for equality before potentially allocating
2125   if (left == right) {
2126     do_if(btest, makecon(TypeInt::CC_EQ));
2127     return;
2128   }
2129 
2130   // Allocate inline type operands and re-execute on deoptimization
2131   if (left->is_InlineType()) {
2132     if (_gvn.type(right)->is_zero_type() ||
2133         (right->is_InlineType() && _gvn.type(right->as_InlineType()->get_is_init())->is_zero_type())) {
2134       // Null checking a scalarized but nullable inline type. Check the IsInit
2135       // input instead of the oop input to avoid keeping buffer allocations alive.
2136       Node* cmp = CmpI(left->as_InlineType()->get_is_init(), intcon(0));
2137       do_if(btest, cmp);
2138       return;
2139     } else {
2140       PreserveReexecuteState preexecs(this);
2141       inc_sp(2);
2142       jvms()->set_should_reexecute(true);
2143       left = left->as_InlineType()->buffer(this)->get_oop();
2144     }
2145   }
2146   if (right->is_InlineType()) {
2147     PreserveReexecuteState preexecs(this);
2148     inc_sp(2);
2149     jvms()->set_should_reexecute(true);
2150     right = right->as_InlineType()->buffer(this)->get_oop();
2151   }
2152 
2153   // First, do a normal pointer comparison
2154   const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2155   const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2156   Node* cmp = CmpP(left, right);
2157   cmp = optimize_cmp_with_klass(cmp);
2158   if (tleft == nullptr || !tleft->can_be_inline_type() ||
2159       tright == nullptr || !tright->can_be_inline_type()) {
2160     // This is sufficient, if one of the operands can't be an inline type
2161     do_if(btest, cmp);
2162     return;
2163   }
2164 
2165   // Don't add traps to unstable if branches because additional checks are required to
2166   // decide if the operands are equal/substitutable and we therefore shouldn't prune
2167   // branches for one if based on the profiling of the acmp branches.
2168   // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2169   // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2170   // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2171   const bool can_trap = true;
2172 
2173   Node* eq_region = nullptr;
2174   if (btest == BoolTest::eq) {
2175     do_if(btest, cmp, !can_trap, true);
2176     if (stopped()) {
2177       // Pointers are equal, operands must be equal
2178       return;
2179     }
2180   } else {
2181     assert(btest == BoolTest::ne, "only eq or ne");
2182     Node* is_not_equal = nullptr;
2183     eq_region = new RegionNode(3);
2184     {
2185       PreserveJVMState pjvms(this);
2186       // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2187       do_if(btest, cmp, !can_trap, false, &is_not_equal);
2188       if (!stopped()) {
2189         eq_region->init_req(1, control());
2190       }
2191     }
2192     if (is_not_equal == nullptr || is_not_equal->is_top()) {
2193       record_for_igvn(eq_region);
2194       set_control(_gvn.transform(eq_region));
2195       return;
2196     }
2197     set_control(is_not_equal);
2198   }
2199 
2200   // Prefer speculative types if available
2201   if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2202     if (tleft->speculative_type() != nullptr) {
2203       left_type = tleft->speculative_type();
2204     }
2205     if (tright->speculative_type() != nullptr) {
2206       right_type = tright->speculative_type();
2207     }
2208   }
2209 
2210   if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2211     ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2212     if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2213       left_ptr = speculative_left_ptr;
2214     } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2215       left_ptr = speculative_left_ptr;
2216     }
2217   }
2218   if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2219     ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2220     if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2221       right_ptr = speculative_right_ptr;
2222     } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2223       right_ptr = speculative_right_ptr;
2224     }
2225   }
2226 
2227   if (left_ptr == ProfileAlwaysNull) {
2228     // Comparison with null. Assert the input is indeed null and we're done.
2229     acmp_always_null_input(left, tleft, btest, eq_region);
2230     return;
2231   }
2232   if (right_ptr == ProfileAlwaysNull) {
2233     // Comparison with null. Assert the input is indeed null and we're done.
2234     acmp_always_null_input(right, tright, btest, eq_region);
2235     return;
2236   }
2237   if (left_type != nullptr && !left_type->is_inlinetype()) {
2238     // Comparison with an object of known type
2239     acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2240     return;
2241   }
2242   if (right_type != nullptr && !right_type->is_inlinetype()) {
2243     // Comparison with an object of known type
2244     acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2245     return;
2246   }
2247   if (!left_inline_type) {
2248     // Comparison with an object known not to be an inline type
2249     acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2250     return;
2251   }
2252   if (!right_inline_type) {
2253     // Comparison with an object known not to be an inline type
2254     acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2255     return;
2256   }
2257 
2258   // Pointers are not equal, check if first operand is non-null
2259   Node* ne_region = new RegionNode(6);
2260   Node* null_ctl;
2261   Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2262   ne_region->init_req(1, null_ctl);
2263 
2264   // First operand is non-null, check if it is an inline type
2265   Node* is_value = inline_type_test(not_null_right);
2266   IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2267   Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2268   ne_region->init_req(2, not_value);
2269   set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2270 
2271   // The first operand is an inline type, check if the second operand is non-null
2272   Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2273   ne_region->init_req(3, null_ctl);
2274 
2275   // Check if both operands are of the same class.
2276   Node* kls_left = load_object_klass(not_null_left);
2277   Node* kls_right = load_object_klass(not_null_right);
2278   Node* kls_cmp = CmpP(kls_left, kls_right);
2279   Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2280   IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2281   Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2282   set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2283   ne_region->init_req(4, kls_ne);
2284 
2285   if (stopped()) {
2286     record_for_igvn(ne_region);
2287     set_control(_gvn.transform(ne_region));
2288     if (btest == BoolTest::ne) {
2289       {
2290         PreserveJVMState pjvms(this);
2291         int target_bci = iter().get_dest();
2292         merge(target_bci);
2293       }
2294       record_for_igvn(eq_region);
2295       set_control(_gvn.transform(eq_region));
2296     }
2297     return;
2298   }
2299 
2300   // Both operands are values types of the same class, we need to perform a
2301   // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2302   Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2303   Node* mem = reset_memory();
2304   Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2305 
2306   Node* eq_io_phi = nullptr;
2307   Node* eq_mem_phi = nullptr;
2308   if (eq_region != nullptr) {
2309     eq_io_phi = PhiNode::make(eq_region, i_o());
2310     eq_mem_phi = PhiNode::make(eq_region, mem);
2311   }
2312 
2313   set_all_memory(mem);
2314 
2315   kill_dead_locals();
2316   ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature());
2317   CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2318   call->set_override_symbolic_info(true);
2319   call->init_req(TypeFunc::Parms, not_null_left);
2320   call->init_req(TypeFunc::Parms+1, not_null_right);
2321   inc_sp(2);
2322   set_edges_for_java_call(call, false, false);
2323   Node* ret = set_results_for_java_call(call, false, true);
2324   dec_sp(2);
2325 
2326   // Test the return value of ValueObjectMethods::isSubstitutable()
2327   // This is the last check, do_if can emit traps now.
2328   Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2329   Node* ctl = C->top();
2330   if (btest == BoolTest::eq) {
2331     PreserveJVMState pjvms(this);
2332     do_if(btest, subst_cmp, can_trap);
2333     if (!stopped()) {
2334       ctl = control();
2335     }
2336   } else {
2337     assert(btest == BoolTest::ne, "only eq or ne");
2338     PreserveJVMState pjvms(this);
2339     do_if(btest, subst_cmp, can_trap, false, &ctl);
2340     if (!stopped()) {
2341       eq_region->init_req(2, control());
2342       eq_io_phi->init_req(2, i_o());
2343       eq_mem_phi->init_req(2, reset_memory());
2344     }
2345   }
2346   ne_region->init_req(5, ctl);
2347   ne_io_phi->init_req(5, i_o());
2348   ne_mem_phi->init_req(5, reset_memory());
2349 
2350   record_for_igvn(ne_region);
2351   set_control(_gvn.transform(ne_region));
2352   set_i_o(_gvn.transform(ne_io_phi));
2353   set_all_memory(_gvn.transform(ne_mem_phi));
2354 
2355   if (btest == BoolTest::ne) {
2356     {
2357       PreserveJVMState pjvms(this);
2358       int target_bci = iter().get_dest();
2359       merge(target_bci);
2360     }
2361 
2362     record_for_igvn(eq_region);
2363     set_control(_gvn.transform(eq_region));
2364     set_i_o(_gvn.transform(eq_io_phi));
2365     set_all_memory(_gvn.transform(eq_mem_phi));
2366   }
2367 }
2368 
2369 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
2370 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
2371 // then either takes the trap or executes the original, unstable if.
2372 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
2373   // Search for an unstable if trap
2374   CallStaticJavaNode* trap = nullptr;
2375   assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
2376   ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
2377   if (trap == nullptr || !trap->jvms()->should_reexecute()) {
2378     // No suitable trap found. Remove unused counter load and increment.
2379     C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
2380     return;
2381   }
2382 
2383   // Remove trap from optimization list since we add another path to the trap.
2384   bool success = C->remove_unstable_if_trap(trap, true);
2385   assert(success, "Trap already modified");
2386 
2387   // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
2388   int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
2389   Node* mask = intcon(right_n_bits(freq_log));
2390   counter = _gvn.transform(new AndINode(counter, mask));
2391   Node* cmp = _gvn.transform(new CmpINode(counter, intcon(0)));
2392   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::mask::eq));
2393   IfNode* iff = _gvn.transform(new IfNode(orig_iff->in(0), bol, orig_iff->_prob, orig_iff->_fcnt))->as_If();
2394   Node* if_true = _gvn.transform(new IfTrueNode(iff));
2395   Node* if_false = _gvn.transform(new IfFalseNode(iff));
2396   assert(!if_true->is_top() && !if_false->is_top(), "trap always / never taken");
2397 
2398   // Trap
2399   assert(trap_proj->outcnt() == 1, "some other nodes are dependent on the trap projection");
2400 
2401   Node* trap_region = new RegionNode(3);
2402   trap_region->set_req(1, trap_proj);
2403   trap_region->set_req(2, if_true);
2404   trap->set_req(0, _gvn.transform(trap_region));
2405 
2406   // Don't trap, execute original if
2407   orig_iff->set_req(0, if_false);
2408 }
2409 
2410 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2411   // Randomly skip emitting an uncommon trap
2412   if (StressUnstableIfTraps && ((C->random() % 2) == 0)) {
2413     return false;
2414   }
2415   // Don't want to speculate on uncommon traps when running with -Xcomp
2416   if (!UseInterpreter) {
2417     return false;
2418   }
2419   return seems_never_taken(prob) &&
2420          !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
2421 }
2422 
2423 void Parse::maybe_add_predicate_after_if(Block* path) {
2424   if (path->is_SEL_head() && path->preds_parsed() == 0) {
2425     // Add predicates at bci of if dominating the loop so traps can be
2426     // recorded on the if's profile data
2427     int bc_depth = repush_if_args();
2428     add_parse_predicates();
2429     dec_sp(bc_depth);
2430     path->set_has_predicates();
2431   }
2432 }
2433 
2434 
2435 //----------------------------adjust_map_after_if------------------------------
2436 // Adjust the JVM state to reflect the result of taking this path.
2437 // Basically, it means inspecting the CmpNode controlling this
2438 // branch, seeing how it constrains a tested value, and then
2439 // deciding if it's worth our while to encode this constraint
2440 // as graph nodes in the current abstract interpretation map.
2441 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2442   if (!c->is_Cmp()) {
2443     maybe_add_predicate_after_if(path);
2444     return;
2445   }
2446 
2447   if (stopped() || btest == BoolTest::illegal) {
2448     return;                             // nothing to do
2449   }
2450 
2451   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2452 
2453   if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2454     repush_if_args();
2455     Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2456                   Deoptimization::Action_reinterpret,
2457                   nullptr,
2458                   (is_fallthrough ? "taken always" : "taken never"));
2459 
2460     if (call != nullptr) {
2461       C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2462     }
2463     return;
2464   }
2465 
2466   Node* val = c->in(1);
2467   Node* con = c->in(2);
2468   const Type* tcon = _gvn.type(con);
2469   const Type* tval = _gvn.type(val);
2470   bool have_con = tcon->singleton();
2471   if (tval->singleton()) {
2472     if (!have_con) {
2473       // Swap, so constant is in con.
2474       con  = val;
2475       tcon = tval;
2476       val  = c->in(2);
2477       tval = _gvn.type(val);
2478       btest = BoolTest(btest).commute();
2479       have_con = true;
2480     } else {
2481       // Do we have two constants?  Then leave well enough alone.
2482       have_con = false;
2483     }
2484   }
2485   if (!have_con) {                        // remaining adjustments need a con
2486     maybe_add_predicate_after_if(path);
2487     return;
2488   }
2489 
2490   sharpen_type_after_if(btest, con, tcon, val, tval);
2491   maybe_add_predicate_after_if(path);
2492 }
2493 
2494 
2495 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
2496   Node* ldk;
2497   if (n->is_DecodeNKlass()) {
2498     if (n->in(1)->Opcode() != Op_LoadNKlass) {
2499       return nullptr;
2500     } else {
2501       ldk = n->in(1);
2502     }
2503   } else if (n->Opcode() != Op_LoadKlass) {
2504     return nullptr;
2505   } else {
2506     ldk = n;
2507   }
2508   assert(ldk != nullptr && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
2509 
2510   Node* adr = ldk->in(MemNode::Address);
2511   intptr_t off = 0;
2512   Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
2513   if (obj == nullptr || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
2514     return nullptr;
2515   const TypePtr* tp = gvn->type(obj)->is_ptr();
2516   if (tp == nullptr || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
2517     return nullptr;
2518 
2519   return obj;
2520 }
2521 
2522 void Parse::sharpen_type_after_if(BoolTest::mask btest,
2523                                   Node* con, const Type* tcon,
2524                                   Node* val, const Type* tval) {
2525   // Look for opportunities to sharpen the type of a node
2526   // whose klass is compared with a constant klass.
2527   if (btest == BoolTest::eq && tcon->isa_klassptr()) {
2528     Node* obj = extract_obj_from_klass_load(&_gvn, val);
2529     const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
2530     if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
2531        // Found:
2532        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2533        // or the narrowOop equivalent.
2534        const Type* obj_type = _gvn.type(obj);
2535        const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
2536        if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
2537            tboth->higher_equal(obj_type)) {
2538           // obj has to be of the exact type Foo if the CmpP succeeds.
2539           int obj_in_map = map()->find_edge(obj);
2540           JVMState* jvms = this->jvms();
2541           if (obj_in_map >= 0 &&
2542               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2543             TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2544             const Type* tcc = ccast->as_Type()->type();
2545             assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2546             // Delay transform() call to allow recovery of pre-cast value
2547             // at the control merge.
2548             _gvn.set_type_bottom(ccast);
2549             record_for_igvn(ccast);
2550             if (tboth->is_inlinetypeptr()) {
2551               ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2552             }
2553             // Here's the payoff.
2554             replace_in_map(obj, ccast);
2555           }
2556        }
2557     }
2558   }
2559 
2560   int val_in_map = map()->find_edge(val);
2561   if (val_in_map < 0)  return;          // replace_in_map would be useless
2562   {
2563     JVMState* jvms = this->jvms();
2564     if (!(jvms->is_loc(val_in_map) ||
2565           jvms->is_stk(val_in_map)))
2566       return;                           // again, it would be useless
2567   }
2568 
2569   // Check for a comparison to a constant, and "know" that the compared
2570   // value is constrained on this path.
2571   assert(tcon->singleton(), "");
2572   ConstraintCastNode* ccast = nullptr;
2573   Node* cast = nullptr;
2574 
2575   switch (btest) {
2576   case BoolTest::eq:                    // Constant test?
2577     {
2578       const Type* tboth = tcon->join_speculative(tval);
2579       if (tboth == tval)  break;        // Nothing to gain.
2580       if (tcon->isa_int()) {
2581         ccast = new CastIINode(control(), val, tboth);
2582       } else if (tcon == TypePtr::NULL_PTR) {
2583         // Cast to null, but keep the pointer identity temporarily live.
2584         ccast = new CastPPNode(control(), val, tboth);
2585       } else {
2586         const TypeF* tf = tcon->isa_float_constant();
2587         const TypeD* td = tcon->isa_double_constant();
2588         // Exclude tests vs float/double 0 as these could be
2589         // either +0 or -0.  Just because you are equal to +0
2590         // doesn't mean you ARE +0!
2591         // Note, following code also replaces Long and Oop values.
2592         if ((!tf || tf->_f != 0.0) &&
2593             (!td || td->_d != 0.0))
2594           cast = con;                   // Replace non-constant val by con.
2595       }
2596     }
2597     break;
2598 
2599   case BoolTest::ne:
2600     if (tcon == TypePtr::NULL_PTR) {
2601       cast = cast_not_null(val, false);
2602     }
2603     break;
2604 
2605   default:
2606     // (At this point we could record int range types with CastII.)
2607     break;
2608   }
2609 
2610   if (ccast != nullptr) {
2611     const Type* tcc = ccast->as_Type()->type();
2612     assert(tcc != tval && tcc->higher_equal(tval), "must improve");
2613     // Delay transform() call to allow recovery of pre-cast value
2614     // at the control merge.
2615     _gvn.set_type_bottom(ccast);
2616     record_for_igvn(ccast);
2617     cast = ccast;
2618   }
2619 
2620   if (cast != nullptr) {                   // Here's the payoff.
2621     replace_in_map(val, cast);
2622   }
2623 }
2624 
2625 /**
2626  * Use speculative type to optimize CmpP node: if comparison is
2627  * against the low level class, cast the object to the speculative
2628  * type if any. CmpP should then go away.
2629  *
2630  * @param c  expected CmpP node
2631  * @return   result of CmpP on object casted to speculative type
2632  *
2633  */
2634 Node* Parse::optimize_cmp_with_klass(Node* c) {
2635   // If this is transformed by the _gvn to a comparison with the low
2636   // level klass then we may be able to use speculation
2637   if (c->Opcode() == Op_CmpP &&
2638       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2639       c->in(2)->is_Con()) {
2640     Node* load_klass = nullptr;
2641     Node* decode = nullptr;
2642     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2643       decode = c->in(1);
2644       load_klass = c->in(1)->in(1);
2645     } else {
2646       load_klass = c->in(1);
2647     }
2648     if (load_klass->in(2)->is_AddP()) {
2649       Node* addp = load_klass->in(2);
2650       Node* obj = addp->in(AddPNode::Address);
2651       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2652       if (obj_type->speculative_type_not_null() != nullptr) {
2653         ciKlass* k = obj_type->speculative_type();
2654         inc_sp(2);
2655         obj = maybe_cast_profiled_obj(obj, k);
2656         dec_sp(2);
2657         if (obj->is_InlineType()) {
2658           assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2659           obj = obj->as_InlineType()->get_oop();
2660         }
2661         // Make the CmpP use the casted obj
2662         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2663         load_klass = load_klass->clone();
2664         load_klass->set_req(2, addp);
2665         load_klass = _gvn.transform(load_klass);
2666         if (decode != nullptr) {
2667           decode = decode->clone();
2668           decode->set_req(1, load_klass);
2669           load_klass = _gvn.transform(decode);
2670         }
2671         c = c->clone();
2672         c->set_req(1, load_klass);
2673         c = _gvn.transform(c);
2674       }
2675     }
2676   }
2677   return c;
2678 }
2679 
2680 //------------------------------do_one_bytecode--------------------------------
2681 // Parse this bytecode, and alter the Parsers JVM->Node mapping
2682 void Parse::do_one_bytecode() {
2683   Node *a, *b, *c, *d;          // Handy temps
2684   BoolTest::mask btest;
2685   int i;
2686 
2687   assert(!has_exceptions(), "bytecode entry state must be clear of throws");
2688 
2689   if (C->check_node_count(NodeLimitFudgeFactor * 5,
2690                           "out of nodes parsing method")) {
2691     return;
2692   }
2693 
2694 #ifdef ASSERT
2695   // for setting breakpoints
2696   if (TraceOptoParse) {
2697     tty->print(" @");
2698     dump_bci(bci());
2699     tty->print(" %s", Bytecodes::name(bc()));
2700     tty->cr();
2701   }
2702 #endif
2703 
2704   switch (bc()) {
2705   case Bytecodes::_nop:
2706     // do nothing
2707     break;
2708   case Bytecodes::_lconst_0:
2709     push_pair(longcon(0));
2710     break;
2711 
2712   case Bytecodes::_lconst_1:
2713     push_pair(longcon(1));
2714     break;
2715 
2716   case Bytecodes::_fconst_0:
2717     push(zerocon(T_FLOAT));
2718     break;
2719 
2720   case Bytecodes::_fconst_1:
2721     push(makecon(TypeF::ONE));
2722     break;
2723 
2724   case Bytecodes::_fconst_2:
2725     push(makecon(TypeF::make(2.0f)));
2726     break;
2727 
2728   case Bytecodes::_dconst_0:
2729     push_pair(zerocon(T_DOUBLE));
2730     break;
2731 
2732   case Bytecodes::_dconst_1:
2733     push_pair(makecon(TypeD::ONE));
2734     break;
2735 
2736   case Bytecodes::_iconst_m1:push(intcon(-1)); break;
2737   case Bytecodes::_iconst_0: push(intcon( 0)); break;
2738   case Bytecodes::_iconst_1: push(intcon( 1)); break;
2739   case Bytecodes::_iconst_2: push(intcon( 2)); break;
2740   case Bytecodes::_iconst_3: push(intcon( 3)); break;
2741   case Bytecodes::_iconst_4: push(intcon( 4)); break;
2742   case Bytecodes::_iconst_5: push(intcon( 5)); break;
2743   case Bytecodes::_bipush:   push(intcon(iter().get_constant_u1())); break;
2744   case Bytecodes::_sipush:   push(intcon(iter().get_constant_u2())); break;
2745   case Bytecodes::_aconst_null: push(null());  break;
2746 
2747   case Bytecodes::_ldc:
2748   case Bytecodes::_ldc_w:
2749   case Bytecodes::_ldc2_w: {
2750     // ciTypeFlow should trap if the ldc is in error state or if the constant is not loaded
2751     assert(!iter().is_in_error(), "ldc is in error state");
2752     ciConstant constant = iter().get_constant();
2753     assert(constant.is_loaded(), "constant is not loaded");
2754     const Type* con_type = Type::make_from_constant(constant);
2755     if (con_type != nullptr) {
2756       push_node(con_type->basic_type(), makecon(con_type));
2757     }
2758     break;
2759   }
2760 
2761   case Bytecodes::_aload_0:
2762     push( local(0) );
2763     break;
2764   case Bytecodes::_aload_1:
2765     push( local(1) );
2766     break;
2767   case Bytecodes::_aload_2:
2768     push( local(2) );
2769     break;
2770   case Bytecodes::_aload_3:
2771     push( local(3) );
2772     break;
2773   case Bytecodes::_aload:
2774     push( local(iter().get_index()) );
2775     break;
2776 
2777   case Bytecodes::_fload_0:
2778   case Bytecodes::_iload_0:
2779     push( local(0) );
2780     break;
2781   case Bytecodes::_fload_1:
2782   case Bytecodes::_iload_1:
2783     push( local(1) );
2784     break;
2785   case Bytecodes::_fload_2:
2786   case Bytecodes::_iload_2:
2787     push( local(2) );
2788     break;
2789   case Bytecodes::_fload_3:
2790   case Bytecodes::_iload_3:
2791     push( local(3) );
2792     break;
2793   case Bytecodes::_fload:
2794   case Bytecodes::_iload:
2795     push( local(iter().get_index()) );
2796     break;
2797   case Bytecodes::_lload_0:
2798     push_pair_local( 0 );
2799     break;
2800   case Bytecodes::_lload_1:
2801     push_pair_local( 1 );
2802     break;
2803   case Bytecodes::_lload_2:
2804     push_pair_local( 2 );
2805     break;
2806   case Bytecodes::_lload_3:
2807     push_pair_local( 3 );
2808     break;
2809   case Bytecodes::_lload:
2810     push_pair_local( iter().get_index() );
2811     break;
2812 
2813   case Bytecodes::_dload_0:
2814     push_pair_local(0);
2815     break;
2816   case Bytecodes::_dload_1:
2817     push_pair_local(1);
2818     break;
2819   case Bytecodes::_dload_2:
2820     push_pair_local(2);
2821     break;
2822   case Bytecodes::_dload_3:
2823     push_pair_local(3);
2824     break;
2825   case Bytecodes::_dload:
2826     push_pair_local(iter().get_index());
2827     break;
2828   case Bytecodes::_fstore_0:
2829   case Bytecodes::_istore_0:
2830   case Bytecodes::_astore_0:
2831     set_local( 0, pop() );
2832     break;
2833   case Bytecodes::_fstore_1:
2834   case Bytecodes::_istore_1:
2835   case Bytecodes::_astore_1:
2836     set_local( 1, pop() );
2837     break;
2838   case Bytecodes::_fstore_2:
2839   case Bytecodes::_istore_2:
2840   case Bytecodes::_astore_2:
2841     set_local( 2, pop() );
2842     break;
2843   case Bytecodes::_fstore_3:
2844   case Bytecodes::_istore_3:
2845   case Bytecodes::_astore_3:
2846     set_local( 3, pop() );
2847     break;
2848   case Bytecodes::_fstore:
2849   case Bytecodes::_istore:
2850   case Bytecodes::_astore:
2851     set_local( iter().get_index(), pop() );
2852     break;
2853   // long stores
2854   case Bytecodes::_lstore_0:
2855     set_pair_local( 0, pop_pair() );
2856     break;
2857   case Bytecodes::_lstore_1:
2858     set_pair_local( 1, pop_pair() );
2859     break;
2860   case Bytecodes::_lstore_2:
2861     set_pair_local( 2, pop_pair() );
2862     break;
2863   case Bytecodes::_lstore_3:
2864     set_pair_local( 3, pop_pair() );
2865     break;
2866   case Bytecodes::_lstore:
2867     set_pair_local( iter().get_index(), pop_pair() );
2868     break;
2869 
2870   // double stores
2871   case Bytecodes::_dstore_0:
2872     set_pair_local( 0, pop_pair() );
2873     break;
2874   case Bytecodes::_dstore_1:
2875     set_pair_local( 1, pop_pair() );
2876     break;
2877   case Bytecodes::_dstore_2:
2878     set_pair_local( 2, pop_pair() );
2879     break;
2880   case Bytecodes::_dstore_3:
2881     set_pair_local( 3, pop_pair() );
2882     break;
2883   case Bytecodes::_dstore:
2884     set_pair_local( iter().get_index(), pop_pair() );
2885     break;
2886 
2887   case Bytecodes::_pop:  dec_sp(1);   break;
2888   case Bytecodes::_pop2: dec_sp(2);   break;
2889   case Bytecodes::_swap:
2890     a = pop();
2891     b = pop();
2892     push(a);
2893     push(b);
2894     break;
2895   case Bytecodes::_dup:
2896     a = pop();
2897     push(a);
2898     push(a);
2899     break;
2900   case Bytecodes::_dup_x1:
2901     a = pop();
2902     b = pop();
2903     push( a );
2904     push( b );
2905     push( a );
2906     break;
2907   case Bytecodes::_dup_x2:
2908     a = pop();
2909     b = pop();
2910     c = pop();
2911     push( a );
2912     push( c );
2913     push( b );
2914     push( a );
2915     break;
2916   case Bytecodes::_dup2:
2917     a = pop();
2918     b = pop();
2919     push( b );
2920     push( a );
2921     push( b );
2922     push( a );
2923     break;
2924 
2925   case Bytecodes::_dup2_x1:
2926     // before: .. c, b, a
2927     // after:  .. b, a, c, b, a
2928     // not tested
2929     a = pop();
2930     b = pop();
2931     c = pop();
2932     push( b );
2933     push( a );
2934     push( c );
2935     push( b );
2936     push( a );
2937     break;
2938   case Bytecodes::_dup2_x2:
2939     // before: .. d, c, b, a
2940     // after:  .. b, a, d, c, b, a
2941     // not tested
2942     a = pop();
2943     b = pop();
2944     c = pop();
2945     d = pop();
2946     push( b );
2947     push( a );
2948     push( d );
2949     push( c );
2950     push( b );
2951     push( a );
2952     break;
2953 
2954   case Bytecodes::_arraylength: {
2955     // Must do null-check with value on expression stack
2956     Node *ary = null_check(peek(), T_ARRAY);
2957     // Compile-time detect of null-exception?
2958     if (stopped())  return;
2959     a = pop();
2960     push(load_array_length(a));
2961     break;
2962   }
2963 
2964   case Bytecodes::_baload:  array_load(T_BYTE);    break;
2965   case Bytecodes::_caload:  array_load(T_CHAR);    break;
2966   case Bytecodes::_iaload:  array_load(T_INT);     break;
2967   case Bytecodes::_saload:  array_load(T_SHORT);   break;
2968   case Bytecodes::_faload:  array_load(T_FLOAT);   break;
2969   case Bytecodes::_aaload:  array_load(T_OBJECT);  break;
2970   case Bytecodes::_laload:  array_load(T_LONG);    break;
2971   case Bytecodes::_daload:  array_load(T_DOUBLE);  break;
2972   case Bytecodes::_bastore: array_store(T_BYTE);   break;
2973   case Bytecodes::_castore: array_store(T_CHAR);   break;
2974   case Bytecodes::_iastore: array_store(T_INT);    break;
2975   case Bytecodes::_sastore: array_store(T_SHORT);  break;
2976   case Bytecodes::_fastore: array_store(T_FLOAT);  break;
2977   case Bytecodes::_aastore: array_store(T_OBJECT); break;
2978   case Bytecodes::_lastore: array_store(T_LONG);   break;
2979   case Bytecodes::_dastore: array_store(T_DOUBLE); break;
2980 
2981   case Bytecodes::_getfield:
2982     do_getfield();
2983     break;
2984 
2985   case Bytecodes::_getstatic:
2986     do_getstatic();
2987     break;
2988 
2989   case Bytecodes::_putfield:
2990     do_putfield();
2991     break;
2992 
2993   case Bytecodes::_putstatic:
2994     do_putstatic();
2995     break;
2996 
2997   case Bytecodes::_irem:
2998     // Must keep both values on the expression-stack during null-check
2999     zero_check_int(peek());
3000     // Compile-time detect of null-exception?
3001     if (stopped())  return;
3002     b = pop();
3003     a = pop();
3004     push(_gvn.transform(new ModINode(control(), a, b)));
3005     break;
3006   case Bytecodes::_idiv:
3007     // Must keep both values on the expression-stack during null-check
3008     zero_check_int(peek());
3009     // Compile-time detect of null-exception?
3010     if (stopped())  return;
3011     b = pop();
3012     a = pop();
3013     push( _gvn.transform( new DivINode(control(),a,b) ) );
3014     break;
3015   case Bytecodes::_imul:
3016     b = pop(); a = pop();
3017     push( _gvn.transform( new MulINode(a,b) ) );
3018     break;
3019   case Bytecodes::_iadd:
3020     b = pop(); a = pop();
3021     push( _gvn.transform( new AddINode(a,b) ) );
3022     break;
3023   case Bytecodes::_ineg:
3024     a = pop();
3025     push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) );
3026     break;
3027   case Bytecodes::_isub:
3028     b = pop(); a = pop();
3029     push( _gvn.transform( new SubINode(a,b) ) );
3030     break;
3031   case Bytecodes::_iand:
3032     b = pop(); a = pop();
3033     push( _gvn.transform( new AndINode(a,b) ) );
3034     break;
3035   case Bytecodes::_ior:
3036     b = pop(); a = pop();
3037     push( _gvn.transform( new OrINode(a,b) ) );
3038     break;
3039   case Bytecodes::_ixor:
3040     b = pop(); a = pop();
3041     push( _gvn.transform( new XorINode(a,b) ) );
3042     break;
3043   case Bytecodes::_ishl:
3044     b = pop(); a = pop();
3045     push( _gvn.transform( new LShiftINode(a,b) ) );
3046     break;
3047   case Bytecodes::_ishr:
3048     b = pop(); a = pop();
3049     push( _gvn.transform( new RShiftINode(a,b) ) );
3050     break;
3051   case Bytecodes::_iushr:
3052     b = pop(); a = pop();
3053     push( _gvn.transform( new URShiftINode(a,b) ) );
3054     break;
3055 
3056   case Bytecodes::_fneg:
3057     a = pop();
3058     b = _gvn.transform(new NegFNode (a));
3059     push(b);
3060     break;
3061 
3062   case Bytecodes::_fsub:
3063     b = pop();
3064     a = pop();
3065     c = _gvn.transform( new SubFNode(a,b) );
3066     push(c);
3067     break;
3068 
3069   case Bytecodes::_fadd:
3070     b = pop();
3071     a = pop();
3072     c = _gvn.transform( new AddFNode(a,b) );
3073     push(c);
3074     break;
3075 
3076   case Bytecodes::_fmul:
3077     b = pop();
3078     a = pop();
3079     c = _gvn.transform( new MulFNode(a,b) );
3080     push(c);
3081     break;
3082 
3083   case Bytecodes::_fdiv:
3084     b = pop();
3085     a = pop();
3086     c = _gvn.transform( new DivFNode(nullptr,a,b) );
3087     push(c);
3088     break;
3089 
3090   case Bytecodes::_frem:
3091     // Generate a ModF node.
3092     b = pop();
3093     a = pop();
3094     push(floating_point_mod(a, b, BasicType::T_FLOAT));
3095     break;
3096 
3097   case Bytecodes::_fcmpl:
3098     b = pop();
3099     a = pop();
3100     c = _gvn.transform( new CmpF3Node( a, b));
3101     push(c);
3102     break;
3103   case Bytecodes::_fcmpg:
3104     b = pop();
3105     a = pop();
3106 
3107     // Same as fcmpl but need to flip the unordered case.  Swap the inputs,
3108     // which negates the result sign except for unordered.  Flip the unordered
3109     // as well by using CmpF3 which implements unordered-lesser instead of
3110     // unordered-greater semantics.  Finally, commute the result bits.  Result
3111     // is same as using a CmpF3Greater except we did it with CmpF3 alone.
3112     c = _gvn.transform( new CmpF3Node( b, a));
3113     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3114     push(c);
3115     break;
3116 
3117   case Bytecodes::_f2i:
3118     a = pop();
3119     push(_gvn.transform(new ConvF2INode(a)));
3120     break;
3121 
3122   case Bytecodes::_d2i:
3123     a = pop_pair();
3124     b = _gvn.transform(new ConvD2INode(a));
3125     push( b );
3126     break;
3127 
3128   case Bytecodes::_f2d:
3129     a = pop();
3130     b = _gvn.transform( new ConvF2DNode(a));
3131     push_pair( b );
3132     break;
3133 
3134   case Bytecodes::_d2f:
3135     a = pop_pair();
3136     b = _gvn.transform( new ConvD2FNode(a));
3137     push( b );
3138     break;
3139 
3140   case Bytecodes::_l2f:
3141     if (Matcher::convL2FSupported()) {
3142       a = pop_pair();
3143       b = _gvn.transform( new ConvL2FNode(a));
3144       push(b);
3145     } else {
3146       l2f();
3147     }
3148     break;
3149 
3150   case Bytecodes::_l2d:
3151     a = pop_pair();
3152     b = _gvn.transform( new ConvL2DNode(a));
3153     push_pair(b);
3154     break;
3155 
3156   case Bytecodes::_f2l:
3157     a = pop();
3158     b = _gvn.transform( new ConvF2LNode(a));
3159     push_pair(b);
3160     break;
3161 
3162   case Bytecodes::_d2l:
3163     a = pop_pair();
3164     b = _gvn.transform( new ConvD2LNode(a));
3165     push_pair(b);
3166     break;
3167 
3168   case Bytecodes::_dsub:
3169     b = pop_pair();
3170     a = pop_pair();
3171     c = _gvn.transform( new SubDNode(a,b) );
3172     push_pair(c);
3173     break;
3174 
3175   case Bytecodes::_dadd:
3176     b = pop_pair();
3177     a = pop_pair();
3178     c = _gvn.transform( new AddDNode(a,b) );
3179     push_pair(c);
3180     break;
3181 
3182   case Bytecodes::_dmul:
3183     b = pop_pair();
3184     a = pop_pair();
3185     c = _gvn.transform( new MulDNode(a,b) );
3186     push_pair(c);
3187     break;
3188 
3189   case Bytecodes::_ddiv:
3190     b = pop_pair();
3191     a = pop_pair();
3192     c = _gvn.transform( new DivDNode(nullptr,a,b) );
3193     push_pair(c);
3194     break;
3195 
3196   case Bytecodes::_dneg:
3197     a = pop_pair();
3198     b = _gvn.transform(new NegDNode (a));
3199     push_pair(b);
3200     break;
3201 
3202   case Bytecodes::_drem:
3203     // Generate a ModD node.
3204     b = pop_pair();
3205     a = pop_pair();
3206     push_pair(floating_point_mod(a, b, BasicType::T_DOUBLE));
3207     break;
3208 
3209   case Bytecodes::_dcmpl:
3210     b = pop_pair();
3211     a = pop_pair();
3212     c = _gvn.transform( new CmpD3Node( a, b));
3213     push(c);
3214     break;
3215 
3216   case Bytecodes::_dcmpg:
3217     b = pop_pair();
3218     a = pop_pair();
3219     // Same as dcmpl but need to flip the unordered case.
3220     // Commute the inputs, which negates the result sign except for unordered.
3221     // Flip the unordered as well by using CmpD3 which implements
3222     // unordered-lesser instead of unordered-greater semantics.
3223     // Finally, negate the result bits.  Result is same as using a
3224     // CmpD3Greater except we did it with CmpD3 alone.
3225     c = _gvn.transform( new CmpD3Node( b, a));
3226     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3227     push(c);
3228     break;
3229 
3230 
3231     // Note for longs -> lo word is on TOS, hi word is on TOS - 1
3232   case Bytecodes::_land:
3233     b = pop_pair();
3234     a = pop_pair();
3235     c = _gvn.transform( new AndLNode(a,b) );
3236     push_pair(c);
3237     break;
3238   case Bytecodes::_lor:
3239     b = pop_pair();
3240     a = pop_pair();
3241     c = _gvn.transform( new OrLNode(a,b) );
3242     push_pair(c);
3243     break;
3244   case Bytecodes::_lxor:
3245     b = pop_pair();
3246     a = pop_pair();
3247     c = _gvn.transform( new XorLNode(a,b) );
3248     push_pair(c);
3249     break;
3250 
3251   case Bytecodes::_lshl:
3252     b = pop();                  // the shift count
3253     a = pop_pair();             // value to be shifted
3254     c = _gvn.transform( new LShiftLNode(a,b) );
3255     push_pair(c);
3256     break;
3257   case Bytecodes::_lshr:
3258     b = pop();                  // the shift count
3259     a = pop_pair();             // value to be shifted
3260     c = _gvn.transform( new RShiftLNode(a,b) );
3261     push_pair(c);
3262     break;
3263   case Bytecodes::_lushr:
3264     b = pop();                  // the shift count
3265     a = pop_pair();             // value to be shifted
3266     c = _gvn.transform( new URShiftLNode(a,b) );
3267     push_pair(c);
3268     break;
3269   case Bytecodes::_lmul:
3270     b = pop_pair();
3271     a = pop_pair();
3272     c = _gvn.transform( new MulLNode(a,b) );
3273     push_pair(c);
3274     break;
3275 
3276   case Bytecodes::_lrem:
3277     // Must keep both values on the expression-stack during null-check
3278     assert(peek(0) == top(), "long word order");
3279     zero_check_long(peek(1));
3280     // Compile-time detect of null-exception?
3281     if (stopped())  return;
3282     b = pop_pair();
3283     a = pop_pair();
3284     c = _gvn.transform( new ModLNode(control(),a,b) );
3285     push_pair(c);
3286     break;
3287 
3288   case Bytecodes::_ldiv:
3289     // Must keep both values on the expression-stack during null-check
3290     assert(peek(0) == top(), "long word order");
3291     zero_check_long(peek(1));
3292     // Compile-time detect of null-exception?
3293     if (stopped())  return;
3294     b = pop_pair();
3295     a = pop_pair();
3296     c = _gvn.transform( new DivLNode(control(),a,b) );
3297     push_pair(c);
3298     break;
3299 
3300   case Bytecodes::_ladd:
3301     b = pop_pair();
3302     a = pop_pair();
3303     c = _gvn.transform( new AddLNode(a,b) );
3304     push_pair(c);
3305     break;
3306   case Bytecodes::_lsub:
3307     b = pop_pair();
3308     a = pop_pair();
3309     c = _gvn.transform( new SubLNode(a,b) );
3310     push_pair(c);
3311     break;
3312   case Bytecodes::_lcmp:
3313     // Safepoints are now inserted _before_ branches.  The long-compare
3314     // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
3315     // slew of control flow.  These are usually followed by a CmpI vs zero and
3316     // a branch; this pattern then optimizes to the obvious long-compare and
3317     // branch.  However, if the branch is backwards there's a Safepoint
3318     // inserted.  The inserted Safepoint captures the JVM state at the
3319     // pre-branch point, i.e. it captures the 3-way value.  Thus if a
3320     // long-compare is used to control a loop the debug info will force
3321     // computation of the 3-way value, even though the generated code uses a
3322     // long-compare and branch.  We try to rectify the situation by inserting
3323     // a SafePoint here and have it dominate and kill the safepoint added at a
3324     // following backwards branch.  At this point the JVM state merely holds 2
3325     // longs but not the 3-way value.
3326     switch (iter().next_bc()) {
3327       case Bytecodes::_ifgt:
3328       case Bytecodes::_iflt:
3329       case Bytecodes::_ifge:
3330       case Bytecodes::_ifle:
3331       case Bytecodes::_ifne:
3332       case Bytecodes::_ifeq:
3333         // If this is a backwards branch in the bytecodes, add Safepoint
3334         maybe_add_safepoint(iter().next_get_dest());
3335       default:
3336         break;
3337     }
3338     b = pop_pair();
3339     a = pop_pair();
3340     c = _gvn.transform( new CmpL3Node( a, b ));
3341     push(c);
3342     break;
3343 
3344   case Bytecodes::_lneg:
3345     a = pop_pair();
3346     b = _gvn.transform( new SubLNode(longcon(0),a));
3347     push_pair(b);
3348     break;
3349   case Bytecodes::_l2i:
3350     a = pop_pair();
3351     push( _gvn.transform( new ConvL2INode(a)));
3352     break;
3353   case Bytecodes::_i2l:
3354     a = pop();
3355     b = _gvn.transform( new ConvI2LNode(a));
3356     push_pair(b);
3357     break;
3358   case Bytecodes::_i2b:
3359     // Sign extend
3360     a = pop();
3361     a = Compile::narrow_value(T_BYTE, a, nullptr, &_gvn, true);
3362     push(a);
3363     break;
3364   case Bytecodes::_i2s:
3365     a = pop();
3366     a = Compile::narrow_value(T_SHORT, a, nullptr, &_gvn, true);
3367     push(a);
3368     break;
3369   case Bytecodes::_i2c:
3370     a = pop();
3371     a = Compile::narrow_value(T_CHAR, a, nullptr, &_gvn, true);
3372     push(a);
3373     break;
3374 
3375   case Bytecodes::_i2f:
3376     a = pop();
3377     b = _gvn.transform( new ConvI2FNode(a) ) ;
3378     push(b);
3379     break;
3380 
3381   case Bytecodes::_i2d:
3382     a = pop();
3383     b = _gvn.transform( new ConvI2DNode(a));
3384     push_pair(b);
3385     break;
3386 
3387   case Bytecodes::_iinc:        // Increment local
3388     i = iter().get_index();     // Get local index
3389     set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
3390     break;
3391 
3392   // Exit points of synchronized methods must have an unlock node
3393   case Bytecodes::_return:
3394     return_current(nullptr);
3395     break;
3396 
3397   case Bytecodes::_ireturn:
3398   case Bytecodes::_areturn:
3399   case Bytecodes::_freturn:
3400     return_current(pop());
3401     break;
3402   case Bytecodes::_lreturn:
3403     return_current(pop_pair());
3404     break;
3405   case Bytecodes::_dreturn:
3406     return_current(pop_pair());
3407     break;
3408 
3409   case Bytecodes::_athrow:
3410     // null exception oop throws null pointer exception
3411     null_check(peek());
3412     if (stopped())  return;
3413     // Hook the thrown exception directly to subsequent handlers.
3414     if (BailoutToInterpreterForThrows) {
3415       // Keep method interpreted from now on.
3416       uncommon_trap(Deoptimization::Reason_unhandled,
3417                     Deoptimization::Action_make_not_compilable);
3418       return;
3419     }
3420     if (env()->jvmti_can_post_on_exceptions()) {
3421       // check if we must post exception events, take uncommon trap if so (with must_throw = false)
3422       uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
3423     }
3424     // Here if either can_post_on_exceptions or should_post_on_exceptions is false
3425     add_exception_state(make_exception_state(peek()));
3426     break;
3427 
3428   case Bytecodes::_goto:   // fall through
3429   case Bytecodes::_goto_w: {
3430     int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
3431 
3432     // If this is a backwards branch in the bytecodes, add Safepoint
3433     maybe_add_safepoint(target_bci);
3434 
3435     // Merge the current control into the target basic block
3436     merge(target_bci);
3437 
3438     // See if we can get some profile data and hand it off to the next block
3439     Block *target_block = block()->successor_for_bci(target_bci);
3440     if (target_block->pred_count() != 1)  break;
3441     ciMethodData* methodData = method()->method_data();
3442     if (!methodData->is_mature())  break;
3443     ciProfileData* data = methodData->bci_to_data(bci());
3444     assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3445     int taken = ((ciJumpData*)data)->taken();
3446     taken = method()->scale_count(taken);
3447     target_block->set_count(taken);
3448     break;
3449   }
3450 
3451   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
3452   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3453   handle_if_null:
3454     // If this is a backwards branch in the bytecodes, add Safepoint
3455     maybe_add_safepoint(iter().get_dest());
3456     a = null();
3457     b = pop();
3458     if (b->is_InlineType()) {
3459       // Null checking a scalarized but nullable inline type. Check the IsInit
3460       // input instead of the oop input to avoid keeping buffer allocations alive
3461       c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT)));
3462     } else {
3463       if (!_gvn.type(b)->speculative_maybe_null() &&
3464           !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3465         inc_sp(1);
3466         Node* null_ctl = top();
3467         b = null_check_oop(b, &null_ctl, true, true, true);
3468         assert(null_ctl->is_top(), "no null control here");
3469         dec_sp(1);
3470       } else if (_gvn.type(b)->speculative_always_null() &&
3471                  !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3472         inc_sp(1);
3473         b = null_assert(b);
3474         dec_sp(1);
3475       }
3476       c = _gvn.transform( new CmpPNode(b, a) );
3477     }
3478     do_ifnull(btest, c);
3479     break;
3480 
3481   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3482   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3483   handle_if_acmp:
3484     // If this is a backwards branch in the bytecodes, add Safepoint
3485     maybe_add_safepoint(iter().get_dest());
3486     a = pop();
3487     b = pop();
3488     do_acmp(btest, b, a);
3489     break;
3490 
3491   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3492   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3493   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3494   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3495   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3496   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3497   handle_ifxx:
3498     // If this is a backwards branch in the bytecodes, add Safepoint
3499     maybe_add_safepoint(iter().get_dest());
3500     a = _gvn.intcon(0);
3501     b = pop();
3502     c = _gvn.transform( new CmpINode(b, a) );
3503     do_if(btest, c);
3504     break;
3505 
3506   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3507   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3508   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3509   case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
3510   case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
3511   case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
3512   handle_if_icmp:
3513     // If this is a backwards branch in the bytecodes, add Safepoint
3514     maybe_add_safepoint(iter().get_dest());
3515     a = pop();
3516     b = pop();
3517     c = _gvn.transform( new CmpINode( b, a ) );
3518     do_if(btest, c);
3519     break;
3520 
3521   case Bytecodes::_tableswitch:
3522     do_tableswitch();
3523     break;
3524 
3525   case Bytecodes::_lookupswitch:
3526     do_lookupswitch();
3527     break;
3528 
3529   case Bytecodes::_invokestatic:
3530   case Bytecodes::_invokedynamic:
3531   case Bytecodes::_invokespecial:
3532   case Bytecodes::_invokevirtual:
3533   case Bytecodes::_invokeinterface:
3534     do_call();
3535     break;
3536   case Bytecodes::_checkcast:
3537     do_checkcast();
3538     break;
3539   case Bytecodes::_instanceof:
3540     do_instanceof();
3541     break;
3542   case Bytecodes::_anewarray:
3543     do_newarray();
3544     break;
3545   case Bytecodes::_newarray:
3546     do_newarray((BasicType)iter().get_index());
3547     break;
3548   case Bytecodes::_multianewarray:
3549     do_multianewarray();
3550     break;
3551   case Bytecodes::_new:
3552     do_new();
3553     break;
3554 
3555   case Bytecodes::_jsr:
3556   case Bytecodes::_jsr_w:
3557     do_jsr();
3558     break;
3559 
3560   case Bytecodes::_ret:
3561     do_ret();
3562     break;
3563 
3564 
3565   case Bytecodes::_monitorenter:
3566     do_monitor_enter();
3567     break;
3568 
3569   case Bytecodes::_monitorexit:
3570     do_monitor_exit();
3571     break;
3572 
3573   case Bytecodes::_breakpoint:
3574     // Breakpoint set concurrently to compile
3575     // %%% use an uncommon trap?
3576     C->record_failure("breakpoint in method");
3577     return;
3578 
3579   default:
3580 #ifndef PRODUCT
3581     map()->dump(99);
3582 #endif
3583     tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
3584     ShouldNotReachHere();
3585   }
3586 
3587 #ifndef PRODUCT
3588   if (failing()) { return; }
3589   constexpr int perBytecode = 6;
3590   if (C->should_print_igv(perBytecode)) {
3591     IdealGraphPrinter* printer = C->igv_printer();
3592     char buffer[256];
3593     jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
3594     bool old = printer->traverse_outs();
3595     printer->set_traverse_outs(true);
3596     printer->print_graph(buffer);
3597     printer->set_traverse_outs(old);
3598   }
3599 #endif
3600 }