1 /*
   2  * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "ci/ciSymbols.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "interpreter/linkResolver.hpp"
  31 #include "jvm_io.h"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "opto/addnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/convertnode.hpp"
  38 #include "opto/divnode.hpp"
  39 #include "opto/idealGraphPrinter.hpp"
  40 #include "opto/idealKit.hpp"
  41 #include "opto/inlinetypenode.hpp"
  42 #include "opto/matcher.hpp"
  43 #include "opto/memnode.hpp"
  44 #include "opto/mulnode.hpp"
  45 #include "opto/opaquenode.hpp"
  46 #include "opto/parse.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "runtime/deoptimization.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 
  51 #ifndef PRODUCT
  52 extern int explicit_null_checks_inserted,
  53            explicit_null_checks_elided;
  54 #endif
  55 
  56 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
  57   // Feed unused profile data to type speculation
  58   if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
  59     ciKlass* array_type = NULL;
  60     ciKlass* element_type = NULL;
  61     ProfilePtrKind element_ptr = ProfileMaybeNull;
  62     bool flat_array = true;
  63     bool null_free_array = true;
  64     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
  65     if (element_type != NULL || element_ptr != ProfileMaybeNull) {
  66       ld = record_profile_for_speculation(ld, element_type, element_ptr);
  67     }
  68   }
  69   return ld;
  70 }
  71 
  72 
  73 //---------------------------------array_load----------------------------------
  74 void Parse::array_load(BasicType bt) {
  75   const Type* elemtype = Type::TOP;
  76   Node* adr = array_addressing(bt, 0, elemtype);
  77   if (stopped())  return;     // guaranteed null or range check
  78 
  79   Node* idx = pop();
  80   Node* ary = pop();
  81 
  82   // Handle inline type arrays
  83   const TypeOopPtr* elemptr = elemtype->make_oopptr();
  84   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
  85   if (ary_t->is_flat()) {
  86     // Load from flattened inline type array
  87     Node* vt = InlineTypeNode::make_from_flattened(this, elemtype->inline_klass(), ary, adr);
  88     push(vt);
  89     return;
  90   } else if (ary_t->is_null_free()) {
  91     // Load from non-flattened inline type array (elements can never be null)
  92     bt = T_PRIMITIVE_OBJECT;
  93   } else if (!ary_t->is_not_flat()) {
  94     // Cannot statically determine if array is flattened, emit runtime check
  95     assert(UseFlatArray && is_reference_type(bt) && elemptr->can_be_inline_type() && !ary_t->klass_is_exact() && !ary_t->is_not_null_free() &&
  96            (!elemptr->is_inlinetypeptr() || elemptr->inline_klass()->flatten_array()), "array can't be flattened");
  97     IdealKit ideal(this);
  98     IdealVariable res(ideal);
  99     ideal.declarations_done();
 100     ideal.if_then(flat_array_test(ary, /* flat = */ false)); {
 101       // non-flattened
 102       assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 103       sync_kit(ideal);
 104       const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 105       Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt,
 106                                 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 107       if (elemptr->is_inlinetypeptr()) {
 108         assert(elemptr->maybe_null(), "null free array should be handled above");
 109         ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), false);
 110       }
 111       ideal.sync_kit(this);
 112       ideal.set(res, ld);
 113     } ideal.else_(); {
 114       // flattened
 115       sync_kit(ideal);
 116       if (elemptr->is_inlinetypeptr()) {
 117         // Element type is known, cast and load from flattened representation
 118         ciInlineKlass* vk = elemptr->inline_klass();
 119         assert(vk->flatten_array() && elemptr->maybe_null(), "never/always flat - should be optimized");
 120         ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true);
 121         const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 122         Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
 123         Node* casted_adr = array_element_address(cast, idx, T_PRIMITIVE_OBJECT, ary_t->size(), control());
 124         // Re-execute flattened array load if buffering triggers deoptimization
 125         PreserveReexecuteState preexecs(this);
 126         jvms()->set_should_reexecute(true);
 127         inc_sp(2);
 128         Node* vt = InlineTypeNode::make_from_flattened(this, vk, cast, casted_adr)->buffer(this, false);
 129         ideal.set(res, vt);
 130         ideal.sync_kit(this);
 131       } else {
 132         // Element type is unknown, emit runtime call
 133 
 134         // Below membars keep this access to an unknown flattened array correctly
 135         // ordered with other unknown and known flattened array accesses.
 136         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 137 
 138         Node* call = NULL;
 139         {
 140           // Re-execute flattened array load if runtime call triggers deoptimization
 141           PreserveReexecuteState preexecs(this);
 142           jvms()->set_bci(_bci);
 143           jvms()->set_should_reexecute(true);
 144           inc_sp(2);
 145           kill_dead_locals();
 146           call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 147                                    OptoRuntime::load_unknown_inline_type(),
 148                                    OptoRuntime::load_unknown_inline_Java(),
 149                                    NULL, TypeRawPtr::BOTTOM,
 150                                    ary, idx);
 151         }
 152         make_slow_call_ex(call, env()->Throwable_klass(), false);
 153         Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
 154 
 155         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 156 
 157         // Keep track of the information that the inline type is flattened in arrays
 158         const Type* unknown_value = elemptr->is_instptr()->cast_to_flatten_array();
 159         buffer = _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
 160 
 161         ideal.sync_kit(this);
 162         ideal.set(res, buffer);
 163       }
 164     } ideal.end_if();
 165     sync_kit(ideal);
 166     Node* ld = _gvn.transform(ideal.value(res));
 167     ld = record_profile_for_speculation_at_array_load(ld);
 168     push_node(bt, ld);
 169     return;
 170   }
 171 
 172   if (elemtype == TypeInt::BOOL) {
 173     bt = T_BOOLEAN;
 174   }
 175   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 176   Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt,
 177                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 178   ld = record_profile_for_speculation_at_array_load(ld);
 179   // Loading a non-flattened inline type
 180   if (elemptr != NULL && elemptr->is_inlinetypeptr()) {
 181     assert(!ary_t->is_null_free() || !elemptr->maybe_null(), "inline type array elements should never be null");
 182     ld = InlineTypeNode::make_from_oop(this, ld, elemptr->inline_klass(), !elemptr->maybe_null());
 183   }
 184   push_node(bt, ld);
 185 }
 186 
 187 
 188 //--------------------------------array_store----------------------------------
 189 void Parse::array_store(BasicType bt) {
 190   const Type* elemtype = Type::TOP;
 191   Node* adr = array_addressing(bt, type2size[bt], elemtype);
 192   if (stopped())  return;     // guaranteed null or range check
 193   Node* cast_val = NULL;
 194   if (bt == T_OBJECT) {
 195     cast_val = array_store_check(adr, elemtype);
 196     if (stopped()) return;
 197   }
 198   Node* val = pop_node(bt); // Value to store
 199   Node* idx = pop();        // Index in the array
 200   Node* ary = pop();        // The array itself
 201 
 202   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
 203   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 204 
 205   if (elemtype == TypeInt::BOOL) {
 206     bt = T_BOOLEAN;
 207   } else if (bt == T_OBJECT) {
 208     elemtype = elemtype->make_oopptr();
 209     const Type* tval = _gvn.type(cast_val);
 210     // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
 211     // This is only legal for non-null stores because the array_store_check always passes for null, even
 212     // if the array is null-free. Null stores are handled in GraphKit::gen_inline_array_null_guard().
 213     bool not_null_free = !tval->maybe_null() && !tval->is_oopptr()->can_be_inline_type();
 214     bool not_flattened = not_null_free || (tval->is_inlinetypeptr() && !tval->inline_klass()->flatten_array());
 215     if (!ary_t->is_not_null_free() && not_null_free) {
 216       // Storing a non-inline type, mark array as not null-free (-> not flat).
 217       ary_t = ary_t->cast_to_not_null_free();
 218       Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
 219       replace_in_map(ary, cast);
 220       ary = cast;
 221     } else if (!ary_t->is_not_flat() && not_flattened) {
 222       // Storing a non-flattened value, mark array as not flat.
 223       ary_t = ary_t->cast_to_not_flat();
 224       Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
 225       replace_in_map(ary, cast);
 226       ary = cast;
 227     }
 228 
 229     if (ary_t->is_flat()) {
 230       // Store to flattened inline type array
 231       assert(!tval->maybe_null(), "should be guaranteed by array store check");
 232       // Re-execute flattened array store if buffering triggers deoptimization
 233       PreserveReexecuteState preexecs(this);
 234       inc_sp(3);
 235       jvms()->set_should_reexecute(true);
 236       cast_val->as_InlineType()->store_flattened(this, ary, adr, NULL, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 237       return;
 238     } else if (ary_t->is_null_free()) {
 239       // Store to non-flattened inline type array (elements can never be null)
 240       assert(!tval->maybe_null(), "should be guaranteed by array store check");
 241       if (elemtype->inline_klass()->is_empty()) {
 242         // Ignore empty inline stores, array is already initialized.
 243         return;
 244       }
 245     } else if (!ary_t->is_not_flat() && (tval != TypePtr::NULL_PTR || StressReflectiveCode)) {
 246       // Array might be flattened, emit runtime checks (for NULL, a simple inline_array_null_guard is sufficient).
 247       assert(UseFlatArray && !not_flattened && elemtype->is_oopptr()->can_be_inline_type() &&
 248              !ary_t->klass_is_exact() && !ary_t->is_not_null_free(), "array can't be flattened");
 249       IdealKit ideal(this);
 250       ideal.if_then(flat_array_test(ary, /* flat = */ false)); {
 251         // non-flattened
 252         assert(ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 253         sync_kit(ideal);
 254         Node* cast_ary = inline_array_null_guard(ary, cast_val, 3);
 255         inc_sp(3);
 256         access_store_at(cast_ary, adr, adr_type, cast_val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
 257         dec_sp(3);
 258         ideal.sync_kit(this);
 259       } ideal.else_(); {
 260         sync_kit(ideal);
 261         // flattened
 262         Node* null_ctl = top();
 263         Node* val = null_check_oop(cast_val, &null_ctl);
 264         if (null_ctl != top()) {
 265           PreserveJVMState pjvms(this);
 266           inc_sp(3);
 267           set_control(null_ctl);
 268           uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
 269           dec_sp(3);
 270         }
 271         // Try to determine the inline klass
 272         ciInlineKlass* vk = NULL;
 273         if (tval->is_inlinetypeptr()) {
 274           vk = tval->inline_klass();
 275         } else if (elemtype->is_inlinetypeptr()) {
 276           vk = elemtype->inline_klass();
 277         }
 278         Node* casted_ary = ary;
 279         if (vk != NULL && !stopped()) {
 280           // Element type is known, cast and store to flattened representation
 281           assert(vk->flatten_array() && elemtype->maybe_null(), "never/always flat - should be optimized");
 282           ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* null_free */ true);
 283           const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 284           casted_ary = _gvn.transform(new CheckCastPPNode(control(), casted_ary, arytype));
 285           Node* casted_adr = array_element_address(casted_ary, idx, T_OBJECT, arytype->size(), control());
 286           if (!val->is_InlineType()) {
 287             assert(!gvn().type(val)->maybe_null(), "inline type array elements should never be null");
 288             val = InlineTypeNode::make_from_oop(this, val, vk);
 289           }
 290           // Re-execute flattened array store if buffering triggers deoptimization
 291           PreserveReexecuteState preexecs(this);
 292           inc_sp(3);
 293           jvms()->set_should_reexecute(true);
 294           val->as_InlineType()->store_flattened(this, casted_ary, casted_adr, NULL, 0, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 295         } else if (!stopped()) {
 296           // Element type is unknown, emit runtime call
 297 
 298           // Below membars keep this access to an unknown flattened array correctly
 299           // ordered with other unknown and known flattened array accesses.
 300           insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 301 
 302           make_runtime_call(RC_LEAF,
 303                             OptoRuntime::store_unknown_inline_type(),
 304                             CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_inline),
 305                             "store_unknown_inline", TypeRawPtr::BOTTOM,
 306                             val, casted_ary, idx);
 307 
 308           insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 309         }
 310         ideal.sync_kit(this);
 311       }
 312       ideal.end_if();
 313       sync_kit(ideal);
 314       return;
 315     } else if (!ary_t->is_not_null_free()) {
 316       // Array is not flattened but may be null free
 317       assert(elemtype->is_oopptr()->can_be_inline_type() && !ary_t->klass_is_exact(), "array can't be null-free");
 318       ary = inline_array_null_guard(ary, cast_val, 3, true);
 319     }
 320   }
 321   inc_sp(3);
 322   access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 323   dec_sp(3);
 324 }
 325 
 326 
 327 //------------------------------array_addressing-------------------------------
 328 // Pull array and index from the stack.  Compute pointer-to-element.
 329 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 330   Node *idx   = peek(0+vals);   // Get from stack without popping
 331   Node *ary   = peek(1+vals);   // in case of exception
 332 
 333   // Null check the array base, with correct stack contents
 334   ary = null_check(ary, T_ARRAY);
 335   // Compile-time detect of null-exception?
 336   if (stopped())  return top();
 337 
 338   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 339   const TypeInt*    sizetype = arytype->size();
 340   elemtype = arytype->elem();
 341 
 342   if (UseUniqueSubclasses) {
 343     const Type* el = elemtype->make_ptr();
 344     if (el && el->isa_instptr()) {
 345       const TypeInstPtr* toop = el->is_instptr();
 346       if (toop->instance_klass()->unique_concrete_subklass()) {
 347         // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
 348         const Type* subklass = Type::get_const_type(toop->instance_klass());
 349         elemtype = subklass->join_speculative(el);
 350       }
 351     }
 352   }
 353 
 354   // Check for big class initializers with all constant offsets
 355   // feeding into a known-size array.
 356   const TypeInt* idxtype = _gvn.type(idx)->is_int();
 357   // See if the highest idx value is less than the lowest array bound,
 358   // and if the idx value cannot be negative:
 359   bool need_range_check = true;
 360   if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
 361     need_range_check = false;
 362     if (C->log() != NULL)   C->log()->elem("observe that='!need_range_check'");
 363   }
 364 
 365   if (!arytype->is_loaded()) {
 366     // Only fails for some -Xcomp runs
 367     // The class is unloaded.  We have to run this bytecode in the interpreter.
 368     ciKlass* klass = arytype->unloaded_klass();
 369 
 370     uncommon_trap(Deoptimization::Reason_unloaded,
 371                   Deoptimization::Action_reinterpret,
 372                   klass, "!loaded array");
 373     return top();
 374   }
 375 
 376   // Do the range check
 377   if (GenerateRangeChecks && need_range_check) {
 378     Node* tst;
 379     if (sizetype->_hi <= 0) {
 380       // The greatest array bound is negative, so we can conclude that we're
 381       // compiling unreachable code, but the unsigned compare trick used below
 382       // only works with non-negative lengths.  Instead, hack "tst" to be zero so
 383       // the uncommon_trap path will always be taken.
 384       tst = _gvn.intcon(0);
 385     } else {
 386       // Range is constant in array-oop, so we can use the original state of mem
 387       Node* len = load_array_length(ary);
 388 
 389       // Test length vs index (standard trick using unsigned compare)
 390       Node* chk = _gvn.transform( new CmpUNode(idx, len) );
 391       BoolTest::mask btest = BoolTest::lt;
 392       tst = _gvn.transform( new BoolNode(chk, btest) );
 393     }
 394     RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 395     _gvn.set_type(rc, rc->Value(&_gvn));
 396     if (!tst->is_Con()) {
 397       record_for_igvn(rc);
 398     }
 399     set_control(_gvn.transform(new IfTrueNode(rc)));
 400     // Branch to failure if out of bounds
 401     {
 402       PreserveJVMState pjvms(this);
 403       set_control(_gvn.transform(new IfFalseNode(rc)));
 404       if (C->allow_range_check_smearing()) {
 405         // Do not use builtin_throw, since range checks are sometimes
 406         // made more stringent by an optimistic transformation.
 407         // This creates "tentative" range checks at this point,
 408         // which are not guaranteed to throw exceptions.
 409         // See IfNode::Ideal, is_range_check, adjust_check.
 410         uncommon_trap(Deoptimization::Reason_range_check,
 411                       Deoptimization::Action_make_not_entrant,
 412                       NULL, "range_check");
 413       } else {
 414         // If we have already recompiled with the range-check-widening
 415         // heroic optimization turned off, then we must really be throwing
 416         // range check exceptions.
 417         builtin_throw(Deoptimization::Reason_range_check);
 418       }
 419     }
 420   }
 421   // Check for always knowing you are throwing a range-check exception
 422   if (stopped())  return top();
 423 
 424   // This could be an access to an inline type array. We can't tell if it's
 425   // flat or not. Knowing the exact type avoids runtime checks and leads to
 426   // a much simpler graph shape. Check profile information.
 427   if (!arytype->is_flat() && !arytype->is_not_flat()) {
 428     // First check the speculative type
 429     Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
 430     ciKlass* array_type = arytype->speculative_type();
 431     if (too_many_traps_or_recompiles(reason) || array_type == NULL) {
 432       // No speculative type, check profile data at this bci
 433       array_type = NULL;
 434       reason = Deoptimization::Reason_class_check;
 435       if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 436         ciKlass* element_type = NULL;
 437         ProfilePtrKind element_ptr = ProfileMaybeNull;
 438         bool flat_array = true;
 439         bool null_free_array = true;
 440         method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 441       }
 442     }
 443     if (array_type != NULL) {
 444       // Speculate that this array has the exact type reported by profile data
 445       Node* better_ary = NULL;
 446       DEBUG_ONLY(Node* old_control = control();)
 447       Node* slow_ctl = type_check_receiver(ary, array_type, 1.0, &better_ary);
 448       if (stopped()) {
 449         // The check always fails and therefore profile information is incorrect. Don't use it.
 450         assert(old_control == slow_ctl, "type check should have been removed");
 451         set_control(slow_ctl);
 452       } else if (!slow_ctl->is_top()) {
 453         { PreserveJVMState pjvms(this);
 454           set_control(slow_ctl);
 455           uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 456         }
 457         replace_in_map(ary, better_ary);
 458         ary = better_ary;
 459         arytype  = _gvn.type(ary)->is_aryptr();
 460         elemtype = arytype->elem();
 461       }
 462     }
 463   } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
 464     // No need to speculate: feed profile data at this bci for the
 465     // array to type speculation
 466     ciKlass* array_type = NULL;
 467     ciKlass* element_type = NULL;
 468     ProfilePtrKind element_ptr = ProfileMaybeNull;
 469     bool flat_array = true;
 470     bool null_free_array = true;
 471     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 472     if (array_type != NULL) {
 473       ary = record_profile_for_speculation(ary, array_type, ProfileMaybeNull);
 474     }
 475   }
 476 
 477   // We have no exact array type from profile data. Check profile data
 478   // for a non null-free or non flat array. Non null-free implies non
 479   // flat so check this one first. Speculating on a non null-free
 480   // array doesn't help aaload but could be profitable for a
 481   // subsequent aastore.
 482   if (!arytype->is_null_free() && !arytype->is_not_null_free()) {
 483     bool null_free_array = true;
 484     Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 485     if (arytype->speculative() != NULL &&
 486         arytype->speculative()->is_aryptr()->is_not_null_free() &&
 487         !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 488       null_free_array = false;
 489       reason = Deoptimization::Reason_speculate_class_check;
 490     } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
 491       ciKlass* array_type = NULL;
 492       ciKlass* element_type = NULL;
 493       ProfilePtrKind element_ptr = ProfileMaybeNull;
 494       bool flat_array = true;
 495       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 496       reason = Deoptimization::Reason_class_check;
 497     }
 498     if (!null_free_array) {
 499       { // Deoptimize if null-free array
 500         BuildCutout unless(this, null_free_array_test(load_object_klass(ary), /* null_free = */ false), PROB_MAX);
 501         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 502       }
 503       assert(!stopped(), "null-free array should have been caught earlier");
 504       Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_null_free()));
 505       replace_in_map(ary, better_ary);
 506       ary = better_ary;
 507       arytype = _gvn.type(ary)->is_aryptr();
 508     }
 509   }
 510 
 511   if (!arytype->is_flat() && !arytype->is_not_flat()) {
 512     bool flat_array = true;
 513     Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 514     if (arytype->speculative() != NULL &&
 515         arytype->speculative()->is_aryptr()->is_not_flat() &&
 516         !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 517       flat_array = false;
 518       reason = Deoptimization::Reason_speculate_class_check;
 519     } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 520       ciKlass* array_type = NULL;
 521       ciKlass* element_type = NULL;
 522       ProfilePtrKind element_ptr = ProfileMaybeNull;
 523       bool null_free_array = true;
 524       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 525       reason = Deoptimization::Reason_class_check;
 526     }
 527     if (!flat_array) {
 528       { // Deoptimize if flat array
 529         BuildCutout unless(this, flat_array_test(ary, /* flat = */ false), PROB_MAX);
 530         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 531       }
 532       assert(!stopped(), "flat array should have been caught earlier");
 533       Node* better_ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype->cast_to_not_flat()));
 534       replace_in_map(ary, better_ary);
 535       ary = better_ary;
 536       arytype = _gvn.type(ary)->is_aryptr();
 537     }
 538   }
 539 
 540   // Make array address computation control dependent to prevent it
 541   // from floating above the range check during loop optimizations.
 542   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 543   assert(ptr != top(), "top should go hand-in-hand with stopped");
 544 
 545   return ptr;
 546 }
 547 
 548 
 549 // returns IfNode
 550 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 551   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 552   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 553   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 554   return iff;
 555 }
 556 
 557 
 558 // sentinel value for the target bci to mark never taken branches
 559 // (according to profiling)
 560 static const int never_reached = INT_MAX;
 561 
 562 //------------------------------helper for tableswitch-------------------------
 563 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 564   // True branch, use existing map info
 565   { PreserveJVMState pjvms(this);
 566     Node *iftrue  = _gvn.transform( new IfTrueNode (iff) );
 567     set_control( iftrue );
 568     if (unc) {
 569       repush_if_args();
 570       uncommon_trap(Deoptimization::Reason_unstable_if,
 571                     Deoptimization::Action_reinterpret,
 572                     NULL,
 573                     "taken always");
 574     } else {
 575       assert(dest_bci_if_true != never_reached, "inconsistent dest");
 576       merge_new_path(dest_bci_if_true);
 577     }
 578   }
 579 
 580   // False branch
 581   Node *iffalse = _gvn.transform( new IfFalseNode(iff) );
 582   set_control( iffalse );
 583 }
 584 
 585 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 586   // True branch, use existing map info
 587   { PreserveJVMState pjvms(this);
 588     Node *iffalse  = _gvn.transform( new IfFalseNode (iff) );
 589     set_control( iffalse );
 590     if (unc) {
 591       repush_if_args();
 592       uncommon_trap(Deoptimization::Reason_unstable_if,
 593                     Deoptimization::Action_reinterpret,
 594                     NULL,
 595                     "taken never");
 596     } else {
 597       assert(dest_bci_if_true != never_reached, "inconsistent dest");
 598       merge_new_path(dest_bci_if_true);
 599     }
 600   }
 601 
 602   // False branch
 603   Node *iftrue = _gvn.transform( new IfTrueNode(iff) );
 604   set_control( iftrue );
 605 }
 606 
 607 void Parse::jump_if_always_fork(int dest_bci, bool unc) {
 608   // False branch, use existing map and control()
 609   if (unc) {
 610     repush_if_args();
 611     uncommon_trap(Deoptimization::Reason_unstable_if,
 612                   Deoptimization::Action_reinterpret,
 613                   NULL,
 614                   "taken never");
 615   } else {
 616     assert(dest_bci != never_reached, "inconsistent dest");
 617     merge_new_path(dest_bci);
 618   }
 619 }
 620 
 621 
 622 extern "C" {
 623   static int jint_cmp(const void *i, const void *j) {
 624     int a = *(jint *)i;
 625     int b = *(jint *)j;
 626     return a > b ? 1 : a < b ? -1 : 0;
 627   }
 628 }
 629 
 630 
 631 class SwitchRange : public StackObj {
 632   // a range of integers coupled with a bci destination
 633   jint _lo;                     // inclusive lower limit
 634   jint _hi;                     // inclusive upper limit
 635   int _dest;
 636   float _cnt;                   // how many times this range was hit according to profiling
 637 
 638 public:
 639   jint lo() const              { return _lo;   }
 640   jint hi() const              { return _hi;   }
 641   int  dest() const            { return _dest; }
 642   bool is_singleton() const    { return _lo == _hi; }
 643   float cnt() const            { return _cnt; }
 644 
 645   void setRange(jint lo, jint hi, int dest, float cnt) {
 646     assert(lo <= hi, "must be a non-empty range");
 647     _lo = lo, _hi = hi; _dest = dest; _cnt = cnt;
 648     assert(_cnt >= 0, "");
 649   }
 650   bool adjoinRange(jint lo, jint hi, int dest, float cnt, bool trim_ranges) {
 651     assert(lo <= hi, "must be a non-empty range");
 652     if (lo == _hi+1) {
 653       // see merge_ranges() comment below
 654       if (trim_ranges) {
 655         if (cnt == 0) {
 656           if (_cnt != 0) {
 657             return false;
 658           }
 659           if (dest != _dest) {
 660             _dest = never_reached;
 661           }
 662         } else {
 663           if (_cnt == 0) {
 664             return false;
 665           }
 666           if (dest != _dest) {
 667             return false;
 668           }
 669         }
 670       } else {
 671         if (dest != _dest) {
 672           return false;
 673         }
 674       }
 675       _hi = hi;
 676       _cnt += cnt;
 677       return true;
 678     }
 679     return false;
 680   }
 681 
 682   void set (jint value, int dest, float cnt) {
 683     setRange(value, value, dest, cnt);
 684   }
 685   bool adjoin(jint value, int dest, float cnt, bool trim_ranges) {
 686     return adjoinRange(value, value, dest, cnt, trim_ranges);
 687   }
 688   bool adjoin(SwitchRange& other) {
 689     return adjoinRange(other._lo, other._hi, other._dest, other._cnt, false);
 690   }
 691 
 692   void print() {
 693     if (is_singleton())
 694       tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt());
 695     else if (lo() == min_jint)
 696       tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt());
 697     else if (hi() == max_jint)
 698       tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt());
 699     else
 700       tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt());
 701   }
 702 };
 703 
 704 // We try to minimize the number of ranges and the size of the taken
 705 // ones using profiling data. When ranges are created,
 706 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge
 707 // if both were never hit or both were hit to build longer unreached
 708 // ranges. Here, we now merge adjoining ranges with the same
 709 // destination and finally set destination of unreached ranges to the
 710 // special value never_reached because it can help minimize the number
 711 // of tests that are necessary.
 712 //
 713 // For instance:
 714 // [0, 1] to target1 sometimes taken
 715 // [1, 2] to target1 never taken
 716 // [2, 3] to target2 never taken
 717 // would lead to:
 718 // [0, 1] to target1 sometimes taken
 719 // [1, 3] never taken
 720 //
 721 // (first 2 ranges to target1 are not merged)
 722 static void merge_ranges(SwitchRange* ranges, int& rp) {
 723   if (rp == 0) {
 724     return;
 725   }
 726   int shift = 0;
 727   for (int j = 0; j < rp; j++) {
 728     SwitchRange& r1 = ranges[j-shift];
 729     SwitchRange& r2 = ranges[j+1];
 730     if (r1.adjoin(r2)) {
 731       shift++;
 732     } else if (shift > 0) {
 733       ranges[j+1-shift] = r2;
 734     }
 735   }
 736   rp -= shift;
 737   for (int j = 0; j <= rp; j++) {
 738     SwitchRange& r = ranges[j];
 739     if (r.cnt() == 0 && r.dest() != never_reached) {
 740       r.setRange(r.lo(), r.hi(), never_reached, r.cnt());
 741     }
 742   }
 743 }
 744 
 745 //-------------------------------do_tableswitch--------------------------------
 746 void Parse::do_tableswitch() {
 747   // Get information about tableswitch
 748   int default_dest = iter().get_dest_table(0);
 749   jint lo_index    = iter().get_int_table(1);
 750   jint hi_index    = iter().get_int_table(2);
 751   int len          = hi_index - lo_index + 1;
 752 
 753   if (len < 1) {
 754     // If this is a backward branch, add safepoint
 755     maybe_add_safepoint(default_dest);
 756     pop(); // the effect of the instruction execution on the operand stack
 757     merge(default_dest);
 758     return;
 759   }
 760 
 761   ciMethodData* methodData = method()->method_data();
 762   ciMultiBranchData* profile = NULL;
 763   if (methodData->is_mature() && UseSwitchProfiling) {
 764     ciProfileData* data = methodData->bci_to_data(bci());
 765     if (data != NULL && data->is_MultiBranchData()) {
 766       profile = (ciMultiBranchData*)data;
 767     }
 768   }
 769   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
 770 
 771   // generate decision tree, using trichotomy when possible
 772   int rnum = len+2;
 773   bool makes_backward_branch = false;
 774   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 775   int rp = -1;
 776   if (lo_index != min_jint) {
 777     float cnt = 1.0F;
 778     if (profile != NULL) {
 779       cnt = (float)profile->default_count() / (hi_index != max_jint ? 2.0F : 1.0F);
 780     }
 781     ranges[++rp].setRange(min_jint, lo_index-1, default_dest, cnt);
 782   }
 783   for (int j = 0; j < len; j++) {
 784     jint match_int = lo_index+j;
 785     int  dest      = iter().get_dest_table(j+3);
 786     makes_backward_branch |= (dest <= bci());
 787     float cnt = 1.0F;
 788     if (profile != NULL) {
 789       cnt = (float)profile->count_at(j);
 790     }
 791     if (rp < 0 || !ranges[rp].adjoin(match_int, dest, cnt, trim_ranges)) {
 792       ranges[++rp].set(match_int, dest, cnt);
 793     }
 794   }
 795   jint highest = lo_index+(len-1);
 796   assert(ranges[rp].hi() == highest, "");
 797   if (highest != max_jint) {
 798     float cnt = 1.0F;
 799     if (profile != NULL) {
 800       cnt = (float)profile->default_count() / (lo_index != min_jint ? 2.0F : 1.0F);
 801     }
 802     if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, cnt, trim_ranges)) {
 803       ranges[++rp].setRange(highest+1, max_jint, default_dest, cnt);
 804     }
 805   }
 806   assert(rp < len+2, "not too many ranges");
 807 
 808   if (trim_ranges) {
 809     merge_ranges(ranges, rp);
 810   }
 811 
 812   // Safepoint in case if backward branch observed
 813   if (makes_backward_branch) {
 814     add_safepoint();
 815   }
 816 
 817   Node* lookup = pop(); // lookup value
 818   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 819 }
 820 
 821 
 822 //------------------------------do_lookupswitch--------------------------------
 823 void Parse::do_lookupswitch() {
 824   // Get information about lookupswitch
 825   int default_dest = iter().get_dest_table(0);
 826   jint len          = iter().get_int_table(1);
 827 
 828   if (len < 1) {    // If this is a backward branch, add safepoint
 829     maybe_add_safepoint(default_dest);
 830     pop(); // the effect of the instruction execution on the operand stack
 831     merge(default_dest);
 832     return;
 833   }
 834 
 835   ciMethodData* methodData = method()->method_data();
 836   ciMultiBranchData* profile = NULL;
 837   if (methodData->is_mature() && UseSwitchProfiling) {
 838     ciProfileData* data = methodData->bci_to_data(bci());
 839     if (data != NULL && data->is_MultiBranchData()) {
 840       profile = (ciMultiBranchData*)data;
 841     }
 842   }
 843   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
 844 
 845   // generate decision tree, using trichotomy when possible
 846   jint* table = NEW_RESOURCE_ARRAY(jint, len*3);
 847   {
 848     for (int j = 0; j < len; j++) {
 849       table[3*j+0] = iter().get_int_table(2+2*j);
 850       table[3*j+1] = iter().get_dest_table(2+2*j+1);
 851       // Handle overflow when converting from uint to jint
 852       table[3*j+2] = (profile == NULL) ? 1 : (jint)MIN2<uint>((uint)max_jint, profile->count_at(j));
 853     }
 854     qsort(table, len, 3*sizeof(table[0]), jint_cmp);
 855   }
 856 
 857   float default_cnt = 1.0F;
 858   if (profile != NULL) {
 859     juint defaults = max_juint - len;
 860     default_cnt = (float)profile->default_count()/(float)defaults;
 861   }
 862 
 863   int rnum = len*2+1;
 864   bool makes_backward_branch = false;
 865   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 866   int rp = -1;
 867   for (int j = 0; j < len; j++) {
 868     jint match_int   = table[3*j+0];
 869     jint  dest        = table[3*j+1];
 870     jint  cnt         = table[3*j+2];
 871     jint  next_lo     = rp < 0 ? min_jint : ranges[rp].hi()+1;
 872     makes_backward_branch |= (dest <= bci());
 873     float c = default_cnt * ((float)match_int - (float)next_lo);
 874     if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, c, trim_ranges))) {
 875       assert(default_dest != never_reached, "sentinel value for dead destinations");
 876       ranges[++rp].setRange(next_lo, match_int-1, default_dest, c);
 877     }
 878     if (rp < 0 || !ranges[rp].adjoin(match_int, dest, (float)cnt, trim_ranges)) {
 879       assert(dest != never_reached, "sentinel value for dead destinations");
 880       ranges[++rp].set(match_int, dest,  (float)cnt);
 881     }
 882   }
 883   jint highest = table[3*(len-1)];
 884   assert(ranges[rp].hi() == highest, "");
 885   if (highest != max_jint &&
 886       !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest), trim_ranges)) {
 887     ranges[++rp].setRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest));
 888   }
 889   assert(rp < rnum, "not too many ranges");
 890 
 891   if (trim_ranges) {
 892     merge_ranges(ranges, rp);
 893   }
 894 
 895   // Safepoint in case backward branch observed
 896   if (makes_backward_branch) {
 897     add_safepoint();
 898   }
 899 
 900   Node *lookup = pop(); // lookup value
 901   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 902 }
 903 
 904 static float if_prob(float taken_cnt, float total_cnt) {
 905   assert(taken_cnt <= total_cnt, "");
 906   if (total_cnt == 0) {
 907     return PROB_FAIR;
 908   }
 909   float p = taken_cnt / total_cnt;
 910   return clamp(p, PROB_MIN, PROB_MAX);
 911 }
 912 
 913 static float if_cnt(float cnt) {
 914   if (cnt == 0) {
 915     return COUNT_UNKNOWN;
 916   }
 917   return cnt;
 918 }
 919 
 920 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) {
 921   float total_cnt = 0;
 922   for (SwitchRange* sr = lo; sr <= hi; sr++) {
 923     total_cnt += sr->cnt();
 924   }
 925   return total_cnt;
 926 }
 927 
 928 class SwitchRanges : public ResourceObj {
 929 public:
 930   SwitchRange* _lo;
 931   SwitchRange* _hi;
 932   SwitchRange* _mid;
 933   float _cost;
 934 
 935   enum {
 936     Start,
 937     LeftDone,
 938     RightDone,
 939     Done
 940   } _state;
 941 
 942   SwitchRanges(SwitchRange *lo, SwitchRange *hi)
 943     : _lo(lo), _hi(hi), _mid(NULL),
 944       _cost(0), _state(Start) {
 945   }
 946 
 947   SwitchRanges()
 948     : _lo(NULL), _hi(NULL), _mid(NULL),
 949       _cost(0), _state(Start) {}
 950 };
 951 
 952 // Estimate cost of performing a binary search on lo..hi
 953 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) {
 954   GrowableArray<SwitchRanges> tree;
 955   SwitchRanges root(lo, hi);
 956   tree.push(root);
 957 
 958   float cost = 0;
 959   do {
 960     SwitchRanges& r = *tree.adr_at(tree.length()-1);
 961     if (r._hi != r._lo) {
 962       if (r._mid == NULL) {
 963         float r_cnt = sum_of_cnts(r._lo, r._hi);
 964 
 965         if (r_cnt == 0) {
 966           tree.pop();
 967           cost = 0;
 968           continue;
 969         }
 970 
 971         SwitchRange* mid = NULL;
 972         mid = r._lo;
 973         for (float cnt = 0; ; ) {
 974           assert(mid <= r._hi, "out of bounds");
 975           cnt += mid->cnt();
 976           if (cnt > r_cnt / 2) {
 977             break;
 978           }
 979           mid++;
 980         }
 981         assert(mid <= r._hi, "out of bounds");
 982         r._mid = mid;
 983         r._cost = r_cnt / total_cnt;
 984       }
 985       r._cost += cost;
 986       if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) {
 987         cost = 0;
 988         r._state = SwitchRanges::LeftDone;
 989         tree.push(SwitchRanges(r._lo, r._mid-1));
 990       } else if (r._state < SwitchRanges::RightDone) {
 991         cost = 0;
 992         r._state = SwitchRanges::RightDone;
 993         tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi));
 994       } else {
 995         tree.pop();
 996         cost = r._cost;
 997       }
 998     } else {
 999       tree.pop();
1000       cost = r._cost;
1001     }
1002   } while (tree.length() > 0);
1003 
1004 
1005   return cost;
1006 }
1007 
1008 // It sometimes pays off to test most common ranges before the binary search
1009 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) {
1010   uint nr = hi - lo + 1;
1011   float total_cnt = sum_of_cnts(lo, hi);
1012 
1013   float min = compute_tree_cost(lo, hi, total_cnt);
1014   float extra = 1;
1015   float sub = 0;
1016 
1017   SwitchRange* array1 = lo;
1018   SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr);
1019 
1020   SwitchRange* ranges = NULL;
1021 
1022   while (nr >= 2) {
1023     assert(lo == array1 || lo == array2, "one the 2 already allocated arrays");
1024     ranges = (lo == array1) ? array2 : array1;
1025 
1026     // Find highest frequency range
1027     SwitchRange* candidate = lo;
1028     for (SwitchRange* sr = lo+1; sr <= hi; sr++) {
1029       if (sr->cnt() > candidate->cnt()) {
1030         candidate = sr;
1031       }
1032     }
1033     SwitchRange most_freq = *candidate;
1034     if (most_freq.cnt() == 0) {
1035       break;
1036     }
1037 
1038     // Copy remaining ranges into another array
1039     int shift = 0;
1040     for (uint i = 0; i < nr; i++) {
1041       SwitchRange* sr = &lo[i];
1042       if (sr != candidate) {
1043         ranges[i-shift] = *sr;
1044       } else {
1045         shift++;
1046         if (i > 0 && i < nr-1) {
1047           SwitchRange prev = lo[i-1];
1048           prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.cnt());
1049           if (prev.adjoin(lo[i+1])) {
1050             shift++;
1051             i++;
1052           }
1053           ranges[i-shift] = prev;
1054         }
1055       }
1056     }
1057     nr -= shift;
1058 
1059     // Evaluate cost of testing the most common range and performing a
1060     // binary search on the other ranges
1061     float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt);
1062     if (cost >= min) {
1063       break;
1064     }
1065     // swap arrays
1066     lo = &ranges[0];
1067     hi = &ranges[nr-1];
1068 
1069     // It pays off: emit the test for the most common range
1070     assert(most_freq.cnt() > 0, "must be taken");
1071     Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo())));
1072     Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo())));
1073     Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le));
1074     IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt()));
1075     jump_if_true_fork(iff, most_freq.dest(), false);
1076 
1077     sub += most_freq.cnt() / total_cnt;
1078     extra += 1 - sub;
1079     min = cost;
1080   }
1081 }
1082 
1083 //----------------------------create_jump_tables-------------------------------
1084 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
1085   // Are jumptables enabled
1086   if (!UseJumpTables)  return false;
1087 
1088   // Are jumptables supported
1089   if (!Matcher::has_match_rule(Op_Jump))  return false;
1090 
1091   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1092 
1093   // Decide if a guard is needed to lop off big ranges at either (or
1094   // both) end(s) of the input set. We'll call this the default target
1095   // even though we can't be sure that it is the true "default".
1096 
1097   bool needs_guard = false;
1098   int default_dest;
1099   int64_t total_outlier_size = 0;
1100   int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
1101   int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
1102 
1103   if (lo->dest() == hi->dest()) {
1104     total_outlier_size = hi_size + lo_size;
1105     default_dest = lo->dest();
1106   } else if (lo_size > hi_size) {
1107     total_outlier_size = lo_size;
1108     default_dest = lo->dest();
1109   } else {
1110     total_outlier_size = hi_size;
1111     default_dest = hi->dest();
1112   }
1113 
1114   float total = sum_of_cnts(lo, hi);
1115   float cost = compute_tree_cost(lo, hi, total);
1116 
1117   // If a guard test will eliminate very sparse end ranges, then
1118   // it is worth the cost of an extra jump.
1119   float trimmed_cnt = 0;
1120   if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
1121     needs_guard = true;
1122     if (default_dest == lo->dest()) {
1123       trimmed_cnt += lo->cnt();
1124       lo++;
1125     }
1126     if (default_dest == hi->dest()) {
1127       trimmed_cnt += hi->cnt();
1128       hi--;
1129     }
1130   }
1131 
1132   // Find the total number of cases and ranges
1133   int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1;
1134   int num_range = hi - lo + 1;
1135 
1136   // Don't create table if: too large, too small, or too sparse.
1137   if (num_cases > MaxJumpTableSize)
1138     return false;
1139   if (UseSwitchProfiling) {
1140     // MinJumpTableSize is set so with a well balanced binary tree,
1141     // when the number of ranges is MinJumpTableSize, it's cheaper to
1142     // go through a JumpNode that a tree of IfNodes. Average cost of a
1143     // tree of IfNodes with MinJumpTableSize is
1144     // log2f(MinJumpTableSize) comparisons. So if the cost computed
1145     // from profile data is less than log2f(MinJumpTableSize) then
1146     // going with the binary search is cheaper.
1147     if (cost < log2f(MinJumpTableSize)) {
1148       return false;
1149     }
1150   } else {
1151     if (num_cases < MinJumpTableSize)
1152       return false;
1153   }
1154   if (num_cases > (MaxJumpTableSparseness * num_range))
1155     return false;
1156 
1157   // Normalize table lookups to zero
1158   int lowval = lo->lo();
1159   key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) );
1160 
1161   // Generate a guard to protect against input keyvals that aren't
1162   // in the switch domain.
1163   if (needs_guard) {
1164     Node*   size = _gvn.intcon(num_cases);
1165     Node*   cmp = _gvn.transform(new CmpUNode(key_val, size));
1166     Node*   tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge));
1167     IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt));
1168     jump_if_true_fork(iff, default_dest, trim_ranges && trimmed_cnt == 0);
1169 
1170     total -= trimmed_cnt;
1171   }
1172 
1173   // Create an ideal node JumpTable that has projections
1174   // of all possible ranges for a switch statement
1175   // The key_val input must be converted to a pointer offset and scaled.
1176   // Compare Parse::array_addressing above.
1177 
1178   // Clean the 32-bit int into a real 64-bit offset.
1179   // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
1180   // Make I2L conversion control dependent to prevent it from
1181   // floating above the range check during loop optimizations.
1182   // Do not use a narrow int type here to prevent the data path from dying
1183   // while the control path is not removed. This can happen if the type of key_val
1184   // is later known to be out of bounds of [0, num_cases] and therefore a narrow cast
1185   // would be replaced by TOP while C2 is not able to fold the corresponding range checks.
1186   // Set _carry_dependency for the cast to avoid being removed by IGVN.
1187 #ifdef _LP64
1188   key_val = C->constrained_convI2L(&_gvn, key_val, TypeInt::INT, control(), true /* carry_dependency */);
1189 #endif
1190 
1191   // Shift the value by wordsize so we have an index into the table, rather
1192   // than a switch value
1193   Node *shiftWord = _gvn.MakeConX(wordSize);
1194   key_val = _gvn.transform( new MulXNode( key_val, shiftWord));
1195 
1196   // Create the JumpNode
1197   Arena* arena = C->comp_arena();
1198   float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases);
1199   int i = 0;
1200   if (total == 0) {
1201     for (SwitchRange* r = lo; r <= hi; r++) {
1202       for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1203         probs[i] = 1.0F / num_cases;
1204       }
1205     }
1206   } else {
1207     for (SwitchRange* r = lo; r <= hi; r++) {
1208       float prob = r->cnt()/total;
1209       for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1210         probs[i] = prob / (r->hi() - r->lo() + 1);
1211       }
1212     }
1213   }
1214 
1215   ciMethodData* methodData = method()->method_data();
1216   ciMultiBranchData* profile = NULL;
1217   if (methodData->is_mature()) {
1218     ciProfileData* data = methodData->bci_to_data(bci());
1219     if (data != NULL && data->is_MultiBranchData()) {
1220       profile = (ciMultiBranchData*)data;
1221     }
1222   }
1223 
1224   Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total));
1225 
1226   // These are the switch destinations hanging off the jumpnode
1227   i = 0;
1228   for (SwitchRange* r = lo; r <= hi; r++) {
1229     for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1230       Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
1231       {
1232         PreserveJVMState pjvms(this);
1233         set_control(input);
1234         jump_if_always_fork(r->dest(), trim_ranges && r->cnt() == 0);
1235       }
1236     }
1237   }
1238   assert(i == num_cases, "miscount of cases");
1239   stop_and_kill_map();  // no more uses for this JVMS
1240   return true;
1241 }
1242 
1243 //----------------------------jump_switch_ranges-------------------------------
1244 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
1245   Block* switch_block = block();
1246   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1247 
1248   if (switch_depth == 0) {
1249     // Do special processing for the top-level call.
1250     assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
1251     assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
1252 
1253     // Decrement pred-numbers for the unique set of nodes.
1254 #ifdef ASSERT
1255     if (!trim_ranges) {
1256       // Ensure that the block's successors are a (duplicate-free) set.
1257       int successors_counted = 0;  // block occurrences in [hi..lo]
1258       int unique_successors = switch_block->num_successors();
1259       for (int i = 0; i < unique_successors; i++) {
1260         Block* target = switch_block->successor_at(i);
1261 
1262         // Check that the set of successors is the same in both places.
1263         int successors_found = 0;
1264         for (SwitchRange* p = lo; p <= hi; p++) {
1265           if (p->dest() == target->start())  successors_found++;
1266         }
1267         assert(successors_found > 0, "successor must be known");
1268         successors_counted += successors_found;
1269       }
1270       assert(successors_counted == (hi-lo)+1, "no unexpected successors");
1271     }
1272 #endif
1273 
1274     // Maybe prune the inputs, based on the type of key_val.
1275     jint min_val = min_jint;
1276     jint max_val = max_jint;
1277     const TypeInt* ti = key_val->bottom_type()->isa_int();
1278     if (ti != NULL) {
1279       min_val = ti->_lo;
1280       max_val = ti->_hi;
1281       assert(min_val <= max_val, "invalid int type");
1282     }
1283     while (lo->hi() < min_val) {
1284       lo++;
1285     }
1286     if (lo->lo() < min_val)  {
1287       lo->setRange(min_val, lo->hi(), lo->dest(), lo->cnt());
1288     }
1289     while (hi->lo() > max_val) {
1290       hi--;
1291     }
1292     if (hi->hi() > max_val) {
1293       hi->setRange(hi->lo(), max_val, hi->dest(), hi->cnt());
1294     }
1295 
1296     linear_search_switch_ranges(key_val, lo, hi);
1297   }
1298 
1299 #ifndef PRODUCT
1300   if (switch_depth == 0) {
1301     _max_switch_depth = 0;
1302     _est_switch_depth = log2i_graceful((hi - lo + 1) - 1) + 1;
1303   }
1304 #endif
1305 
1306   assert(lo <= hi, "must be a non-empty set of ranges");
1307   if (lo == hi) {
1308     jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1309   } else {
1310     assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
1311     assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
1312 
1313     if (create_jump_tables(key_val, lo, hi)) return;
1314 
1315     SwitchRange* mid = NULL;
1316     float total_cnt = sum_of_cnts(lo, hi);
1317 
1318     int nr = hi - lo + 1;
1319     if (UseSwitchProfiling) {
1320       // Don't keep the binary search tree balanced: pick up mid point
1321       // that split frequencies in half.
1322       float cnt = 0;
1323       for (SwitchRange* sr = lo; sr <= hi; sr++) {
1324         cnt += sr->cnt();
1325         if (cnt >= total_cnt / 2) {
1326           mid = sr;
1327           break;
1328         }
1329       }
1330     } else {
1331       mid = lo + nr/2;
1332 
1333       // if there is an easy choice, pivot at a singleton:
1334       if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton())  mid--;
1335 
1336       assert(lo < mid && mid <= hi, "good pivot choice");
1337       assert(nr != 2 || mid == hi,   "should pick higher of 2");
1338       assert(nr != 3 || mid == hi-1, "should pick middle of 3");
1339     }
1340 
1341 
1342     Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo());
1343 
1344     if (mid->is_singleton()) {
1345       IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt()));
1346       jump_if_false_fork(iff_ne, mid->dest(), trim_ranges && mid->cnt() == 0);
1347 
1348       // Special Case:  If there are exactly three ranges, and the high
1349       // and low range each go to the same place, omit the "gt" test,
1350       // since it will not discriminate anything.
1351       bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo;
1352 
1353       // if there is a higher range, test for it and process it:
1354       if (mid < hi && !eq_test_only) {
1355         // two comparisons of same values--should enable 1 test for 2 branches
1356         // Use BoolTest::lt instead of BoolTest::gt
1357         float cnt = sum_of_cnts(lo, mid-1);
1358         IfNode *iff_lt  = jump_if_fork_int(key_val, test_val, BoolTest::lt, if_prob(cnt, total_cnt), if_cnt(cnt));
1359         Node   *iftrue  = _gvn.transform( new IfTrueNode(iff_lt) );
1360         Node   *iffalse = _gvn.transform( new IfFalseNode(iff_lt) );
1361         { PreserveJVMState pjvms(this);
1362           set_control(iffalse);
1363           jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
1364         }
1365         set_control(iftrue);
1366       }
1367 
1368     } else {
1369       // mid is a range, not a singleton, so treat mid..hi as a unit
1370       float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi);
1371       IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt));
1372 
1373       // if there is a higher range, test for it and process it:
1374       if (mid == hi) {
1375         jump_if_true_fork(iff_ge, mid->dest(), trim_ranges && cnt == 0);
1376       } else {
1377         Node *iftrue  = _gvn.transform( new IfTrueNode(iff_ge) );
1378         Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) );
1379         { PreserveJVMState pjvms(this);
1380           set_control(iftrue);
1381           jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1);
1382         }
1383         set_control(iffalse);
1384       }
1385     }
1386 
1387     // in any case, process the lower range
1388     if (mid == lo) {
1389       if (mid->is_singleton()) {
1390         jump_switch_ranges(key_val, lo+1, hi, switch_depth+1);
1391       } else {
1392         jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1393       }
1394     } else {
1395       jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
1396     }
1397   }
1398 
1399   // Decrease pred_count for each successor after all is done.
1400   if (switch_depth == 0) {
1401     int unique_successors = switch_block->num_successors();
1402     for (int i = 0; i < unique_successors; i++) {
1403       Block* target = switch_block->successor_at(i);
1404       // Throw away the pre-allocated path for each unique successor.
1405       target->next_path_num();
1406     }
1407   }
1408 
1409 #ifndef PRODUCT
1410   _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
1411   if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
1412     SwitchRange* r;
1413     int nsing = 0;
1414     for( r = lo; r <= hi; r++ ) {
1415       if( r->is_singleton() )  nsing++;
1416     }
1417     tty->print(">>> ");
1418     _method->print_short_name();
1419     tty->print_cr(" switch decision tree");
1420     tty->print_cr("    %d ranges (%d singletons), max_depth=%d, est_depth=%d",
1421                   (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth);
1422     if (_max_switch_depth > _est_switch_depth) {
1423       tty->print_cr("******** BAD SWITCH DEPTH ********");
1424     }
1425     tty->print("   ");
1426     for( r = lo; r <= hi; r++ ) {
1427       r->print();
1428     }
1429     tty->cr();
1430   }
1431 #endif
1432 }
1433 
1434 void Parse::modf() {
1435   Node *f2 = pop();
1436   Node *f1 = pop();
1437   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
1438                               CAST_FROM_FN_PTR(address, SharedRuntime::frem),
1439                               "frem", NULL, //no memory effects
1440                               f1, f2);
1441   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1442 
1443   push(res);
1444 }
1445 
1446 void Parse::modd() {
1447   Node *d2 = pop_pair();
1448   Node *d1 = pop_pair();
1449   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
1450                               CAST_FROM_FN_PTR(address, SharedRuntime::drem),
1451                               "drem", NULL, //no memory effects
1452                               d1, top(), d2, top());
1453   Node* res_d   = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1454 
1455 #ifdef ASSERT
1456   Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1));
1457   assert(res_top == top(), "second value must be top");
1458 #endif
1459 
1460   push_pair(res_d);
1461 }
1462 
1463 void Parse::l2f() {
1464   Node* f2 = pop();
1465   Node* f1 = pop();
1466   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
1467                               CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
1468                               "l2f", NULL, //no memory effects
1469                               f1, f2);
1470   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1471 
1472   push(res);
1473 }
1474 
1475 // Handle jsr and jsr_w bytecode
1476 void Parse::do_jsr() {
1477   assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
1478 
1479   // Store information about current state, tagged with new _jsr_bci
1480   int return_bci = iter().next_bci();
1481   int jsr_bci    = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
1482 
1483   // The way we do things now, there is only one successor block
1484   // for the jsr, because the target code is cloned by ciTypeFlow.
1485   Block* target = successor_for_bci(jsr_bci);
1486 
1487   // What got pushed?
1488   const Type* ret_addr = target->peek();
1489   assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
1490 
1491   // Effect on jsr on stack
1492   push(_gvn.makecon(ret_addr));
1493 
1494   // Flow to the jsr.
1495   merge(jsr_bci);
1496 }
1497 
1498 // Handle ret bytecode
1499 void Parse::do_ret() {
1500   // Find to whom we return.
1501   assert(block()->num_successors() == 1, "a ret can only go one place now");
1502   Block* target = block()->successor_at(0);
1503   assert(!target->is_ready(), "our arrival must be expected");
1504   int pnum = target->next_path_num();
1505   merge_common(target, pnum);
1506 }
1507 
1508 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
1509   if (btest != BoolTest::eq && btest != BoolTest::ne) {
1510     // Only ::eq and ::ne are supported for profile injection.
1511     return false;
1512   }
1513   if (test->is_Cmp() &&
1514       test->in(1)->Opcode() == Op_ProfileBoolean) {
1515     ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
1516     int false_cnt = profile->false_count();
1517     int  true_cnt = profile->true_count();
1518 
1519     // Counts matching depends on the actual test operation (::eq or ::ne).
1520     // No need to scale the counts because profile injection was designed
1521     // to feed exact counts into VM.
1522     taken     = (btest == BoolTest::eq) ? false_cnt :  true_cnt;
1523     not_taken = (btest == BoolTest::eq) ?  true_cnt : false_cnt;
1524 
1525     profile->consume();
1526     return true;
1527   }
1528   return false;
1529 }
1530 //--------------------------dynamic_branch_prediction--------------------------
1531 // Try to gather dynamic branch prediction behavior.  Return a probability
1532 // of the branch being taken and set the "cnt" field.  Returns a -1.0
1533 // if we need to use static prediction for some reason.
1534 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
1535   ResourceMark rm;
1536 
1537   cnt  = COUNT_UNKNOWN;
1538 
1539   int     taken = 0;
1540   int not_taken = 0;
1541 
1542   bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
1543 
1544   if (use_mdo) {
1545     // Use MethodData information if it is available
1546     // FIXME: free the ProfileData structure
1547     ciMethodData* methodData = method()->method_data();
1548     if (!methodData->is_mature())  return PROB_UNKNOWN;
1549     ciProfileData* data = methodData->bci_to_data(bci());
1550     if (data == NULL) {
1551       return PROB_UNKNOWN;
1552     }
1553     if (!data->is_JumpData())  return PROB_UNKNOWN;
1554 
1555     // get taken and not taken values
1556     taken = data->as_JumpData()->taken();
1557     not_taken = 0;
1558     if (data->is_BranchData()) {
1559       not_taken = data->as_BranchData()->not_taken();
1560     }
1561 
1562     // scale the counts to be commensurate with invocation counts:
1563     taken = method()->scale_count(taken);
1564     not_taken = method()->scale_count(not_taken);
1565   }
1566 
1567   // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1568   // We also check that individual counters are positive first, otherwise the sum can become positive.
1569   if (taken < 0 || not_taken < 0 || taken + not_taken < 40) {
1570     if (C->log() != NULL) {
1571       C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
1572     }
1573     return PROB_UNKNOWN;
1574   }
1575 
1576   // Compute frequency that we arrive here
1577   float sum = taken + not_taken;
1578   // Adjust, if this block is a cloned private block but the
1579   // Jump counts are shared.  Taken the private counts for
1580   // just this path instead of the shared counts.
1581   if( block()->count() > 0 )
1582     sum = block()->count();
1583   cnt = sum / FreqCountInvocations;
1584 
1585   // Pin probability to sane limits
1586   float prob;
1587   if( !taken )
1588     prob = (0+PROB_MIN) / 2;
1589   else if( !not_taken )
1590     prob = (1+PROB_MAX) / 2;
1591   else {                         // Compute probability of true path
1592     prob = (float)taken / (float)(taken + not_taken);
1593     if (prob > PROB_MAX)  prob = PROB_MAX;
1594     if (prob < PROB_MIN)   prob = PROB_MIN;
1595   }
1596 
1597   assert((cnt > 0.0f) && (prob > 0.0f),
1598          "Bad frequency assignment in if");
1599 
1600   if (C->log() != NULL) {
1601     const char* prob_str = NULL;
1602     if (prob >= PROB_MAX)  prob_str = (prob == PROB_MAX) ? "max" : "always";
1603     if (prob <= PROB_MIN)  prob_str = (prob == PROB_MIN) ? "min" : "never";
1604     char prob_str_buf[30];
1605     if (prob_str == NULL) {
1606       jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob);
1607       prob_str = prob_str_buf;
1608     }
1609     C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
1610                    iter().get_dest(), taken, not_taken, cnt, prob_str);
1611   }
1612   return prob;
1613 }
1614 
1615 //-----------------------------branch_prediction-------------------------------
1616 float Parse::branch_prediction(float& cnt,
1617                                BoolTest::mask btest,
1618                                int target_bci,
1619                                Node* test) {
1620   float prob = dynamic_branch_prediction(cnt, btest, test);
1621   // If prob is unknown, switch to static prediction
1622   if (prob != PROB_UNKNOWN)  return prob;
1623 
1624   prob = PROB_FAIR;                   // Set default value
1625   if (btest == BoolTest::eq)          // Exactly equal test?
1626     prob = PROB_STATIC_INFREQUENT;    // Assume its relatively infrequent
1627   else if (btest == BoolTest::ne)
1628     prob = PROB_STATIC_FREQUENT;      // Assume its relatively frequent
1629 
1630   // If this is a conditional test guarding a backwards branch,
1631   // assume its a loop-back edge.  Make it a likely taken branch.
1632   if (target_bci < bci()) {
1633     if (is_osr_parse()) {    // Could be a hot OSR'd loop; force deopt
1634       // Since it's an OSR, we probably have profile data, but since
1635       // branch_prediction returned PROB_UNKNOWN, the counts are too small.
1636       // Let's make a special check here for completely zero counts.
1637       ciMethodData* methodData = method()->method_data();
1638       if (!methodData->is_empty()) {
1639         ciProfileData* data = methodData->bci_to_data(bci());
1640         // Only stop for truly zero counts, which mean an unknown part
1641         // of the OSR-ed method, and we want to deopt to gather more stats.
1642         // If you have ANY counts, then this loop is simply 'cold' relative
1643         // to the OSR loop.
1644         if (data == NULL ||
1645             (data->as_BranchData()->taken() +  data->as_BranchData()->not_taken() == 0)) {
1646           // This is the only way to return PROB_UNKNOWN:
1647           return PROB_UNKNOWN;
1648         }
1649       }
1650     }
1651     prob = PROB_STATIC_FREQUENT;     // Likely to take backwards branch
1652   }
1653 
1654   assert(prob != PROB_UNKNOWN, "must have some guess at this point");
1655   return prob;
1656 }
1657 
1658 // The magic constants are chosen so as to match the output of
1659 // branch_prediction() when the profile reports a zero taken count.
1660 // It is important to distinguish zero counts unambiguously, because
1661 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
1662 // very small but nonzero probabilities, which if confused with zero
1663 // counts would keep the program recompiling indefinitely.
1664 bool Parse::seems_never_taken(float prob) const {
1665   return prob < PROB_MIN;
1666 }
1667 
1668 // True if the comparison seems to be the kind that will not change its
1669 // statistics from true to false.  See comments in adjust_map_after_if.
1670 // This question is only asked along paths which are already
1671 // classified as untaken (by seems_never_taken), so really,
1672 // if a path is never taken, its controlling comparison is
1673 // already acting in a stable fashion.  If the comparison
1674 // seems stable, we will put an expensive uncommon trap
1675 // on the untaken path.
1676 bool Parse::seems_stable_comparison() const {
1677   if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) {
1678     return false;
1679   }
1680   return true;
1681 }
1682 
1683 //-------------------------------repush_if_args--------------------------------
1684 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
1685 inline int Parse::repush_if_args() {
1686   if (PrintOpto && WizardMode) {
1687     tty->print("defending against excessive implicit null exceptions on %s @%d in ",
1688                Bytecodes::name(iter().cur_bc()), iter().cur_bci());
1689     method()->print_name(); tty->cr();
1690   }
1691   int bc_depth = - Bytecodes::depth(iter().cur_bc());
1692   assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
1693   DEBUG_ONLY(sync_jvms());   // argument(n) requires a synced jvms
1694   assert(argument(0) != NULL, "must exist");
1695   assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
1696   inc_sp(bc_depth);
1697   return bc_depth;
1698 }
1699 
1700 //----------------------------------do_ifnull----------------------------------
1701 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
1702   int target_bci = iter().get_dest();
1703 
1704   Block* branch_block = successor_for_bci(target_bci);
1705   Block* next_block   = successor_for_bci(iter().next_bci());
1706 
1707   float cnt;
1708   float prob = branch_prediction(cnt, btest, target_bci, c);
1709   if (prob == PROB_UNKNOWN) {
1710     // (An earlier version of do_ifnull omitted this trap for OSR methods.)
1711     if (PrintOpto && Verbose) {
1712       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1713     }
1714     repush_if_args(); // to gather stats on loop
1715     uncommon_trap(Deoptimization::Reason_unreached,
1716                   Deoptimization::Action_reinterpret,
1717                   NULL, "cold");
1718     if (C->eliminate_boxing()) {
1719       // Mark the successor blocks as parsed
1720       branch_block->next_path_num();
1721       next_block->next_path_num();
1722     }
1723     return;
1724   }
1725 
1726   NOT_PRODUCT(explicit_null_checks_inserted++);
1727 
1728   // Generate real control flow
1729   Node   *tst = _gvn.transform( new BoolNode( c, btest ) );
1730 
1731   // Sanity check the probability value
1732   assert(prob > 0.0f,"Bad probability in Parser");
1733  // Need xform to put node in hash table
1734   IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1735   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1736   // True branch
1737   { PreserveJVMState pjvms(this);
1738     Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1739     set_control(iftrue);
1740 
1741     if (stopped()) {            // Path is dead?
1742       NOT_PRODUCT(explicit_null_checks_elided++);
1743       if (C->eliminate_boxing()) {
1744         // Mark the successor block as parsed
1745         branch_block->next_path_num();
1746       }
1747     } else {                    // Path is live.
1748       adjust_map_after_if(btest, c, prob, branch_block);
1749       if (!stopped()) {
1750         merge(target_bci);
1751       }
1752     }
1753   }
1754 
1755   // False branch
1756   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1757   set_control(iffalse);
1758 
1759   if (stopped()) {              // Path is dead?
1760     NOT_PRODUCT(explicit_null_checks_elided++);
1761     if (C->eliminate_boxing()) {
1762       // Mark the successor block as parsed
1763       next_block->next_path_num();
1764     }
1765   } else  {                     // Path is live.
1766     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1767   }
1768 }
1769 
1770 //------------------------------------do_if------------------------------------
1771 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) {
1772   int target_bci = iter().get_dest();
1773 
1774   Block* branch_block = successor_for_bci(target_bci);
1775   Block* next_block   = successor_for_bci(iter().next_bci());
1776 
1777   float cnt;
1778   float prob = branch_prediction(cnt, btest, target_bci, c);
1779   float untaken_prob = 1.0 - prob;
1780 
1781   if (prob == PROB_UNKNOWN) {
1782     if (PrintOpto && Verbose) {
1783       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1784     }
1785     repush_if_args(); // to gather stats on loop
1786     uncommon_trap(Deoptimization::Reason_unreached,
1787                   Deoptimization::Action_reinterpret,
1788                   NULL, "cold");
1789     if (C->eliminate_boxing()) {
1790       // Mark the successor blocks as parsed
1791       branch_block->next_path_num();
1792       next_block->next_path_num();
1793     }
1794     return;
1795   }
1796 
1797   // Sanity check the probability value
1798   assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1799 
1800   bool taken_if_true = true;
1801   // Convert BoolTest to canonical form:
1802   if (!BoolTest(btest).is_canonical()) {
1803     btest         = BoolTest(btest).negate();
1804     taken_if_true = false;
1805     // prob is NOT updated here; it remains the probability of the taken
1806     // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1807   }
1808   assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1809 
1810   Node* tst0 = new BoolNode(c, btest);
1811   Node* tst = _gvn.transform(tst0);
1812   BoolTest::mask taken_btest   = BoolTest::illegal;
1813   BoolTest::mask untaken_btest = BoolTest::illegal;
1814 
1815   if (tst->is_Bool()) {
1816     // Refresh c from the transformed bool node, since it may be
1817     // simpler than the original c.  Also re-canonicalize btest.
1818     // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
1819     // That can arise from statements like: if (x instanceof C) ...
1820     if (tst != tst0) {
1821       // Canonicalize one more time since transform can change it.
1822       btest = tst->as_Bool()->_test._test;
1823       if (!BoolTest(btest).is_canonical()) {
1824         // Reverse edges one more time...
1825         tst   = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1826         btest = tst->as_Bool()->_test._test;
1827         assert(BoolTest(btest).is_canonical(), "sanity");
1828         taken_if_true = !taken_if_true;
1829       }
1830       c = tst->in(1);
1831     }
1832     BoolTest::mask neg_btest = BoolTest(btest).negate();
1833     taken_btest   = taken_if_true ?     btest : neg_btest;
1834     untaken_btest = taken_if_true ? neg_btest :     btest;
1835   }
1836 
1837   // Generate real control flow
1838   float true_prob = (taken_if_true ? prob : untaken_prob);
1839   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1840   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1841   Node* taken_branch   = new IfTrueNode(iff);
1842   Node* untaken_branch = new IfFalseNode(iff);
1843   if (!taken_if_true) {  // Finish conversion to canonical form
1844     Node* tmp      = taken_branch;
1845     taken_branch   = untaken_branch;
1846     untaken_branch = tmp;
1847   }
1848 
1849   // Branch is taken:
1850   { PreserveJVMState pjvms(this);
1851     taken_branch = _gvn.transform(taken_branch);
1852     set_control(taken_branch);
1853 
1854     if (stopped()) {
1855       if (C->eliminate_boxing() && !new_path) {
1856         // Mark the successor block as parsed (if we haven't created a new path)
1857         branch_block->next_path_num();
1858       }
1859     } else {
1860       adjust_map_after_if(taken_btest, c, prob, branch_block);
1861       if (!stopped()) {
1862         if (new_path) {
1863           // Merge by using a new path
1864           merge_new_path(target_bci);
1865         } else if (ctrl_taken != NULL) {
1866           // Don't merge but save taken branch to be wired by caller
1867           *ctrl_taken = control();
1868         } else {
1869           merge(target_bci);
1870         }
1871       }
1872     }
1873   }
1874 
1875   untaken_branch = _gvn.transform(untaken_branch);
1876   set_control(untaken_branch);
1877 
1878   // Branch not taken.
1879   if (stopped() && ctrl_taken == NULL) {
1880     if (C->eliminate_boxing()) {
1881       // Mark the successor block as parsed (if caller does not re-wire control flow)
1882       next_block->next_path_num();
1883     }
1884   } else {
1885     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
1886   }
1887 }
1888 
1889 
1890 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1891   if (t->speculative() == NULL) {
1892     return ProfileUnknownNull;
1893   }
1894   if (t->speculative_always_null()) {
1895     return ProfileAlwaysNull;
1896   }
1897   if (t->speculative_maybe_null()) {
1898     return ProfileMaybeNull;
1899   }
1900   return ProfileNeverNull;
1901 }
1902 
1903 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1904   inc_sp(2);
1905   Node* cast = null_check_common(input, T_OBJECT, true, NULL,
1906                                  !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1907                                  speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1908   dec_sp(2);
1909   if (btest == BoolTest::ne) {
1910     {
1911       PreserveJVMState pjvms(this);
1912       replace_in_map(input, cast);
1913       int target_bci = iter().get_dest();
1914       merge(target_bci);
1915     }
1916     record_for_igvn(eq_region);
1917     set_control(_gvn.transform(eq_region));
1918   } else {
1919     replace_in_map(input, cast);
1920   }
1921 }
1922 
1923 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
1924   inc_sp(2);
1925   null_ctl = top();
1926   Node* cast = null_check_oop(input, &null_ctl,
1927                               input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
1928                               false,
1929                               speculative_ptr_kind(tinput) == ProfileNeverNull &&
1930                               !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
1931   dec_sp(2);
1932   assert(!stopped(), "null input should have been caught earlier");
1933   return cast;
1934 }
1935 
1936 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
1937   Node* ne_region = new RegionNode(1);
1938   Node* null_ctl;
1939   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
1940   ne_region->add_req(null_ctl);
1941 
1942   Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
1943   {
1944     PreserveJVMState pjvms(this);
1945     inc_sp(2);
1946     set_control(slow_ctl);
1947     Deoptimization::DeoptReason reason;
1948     if (tinput->speculative_type() != NULL && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
1949       reason = Deoptimization::Reason_speculate_class_check;
1950     } else {
1951       reason = Deoptimization::Reason_class_check;
1952     }
1953     uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
1954   }
1955   ne_region->add_req(control());
1956 
1957   record_for_igvn(ne_region);
1958   set_control(_gvn.transform(ne_region));
1959   if (btest == BoolTest::ne) {
1960     {
1961       PreserveJVMState pjvms(this);
1962       if (null_ctl == top()) {
1963         replace_in_map(input, cast);
1964       }
1965       int target_bci = iter().get_dest();
1966       merge(target_bci);
1967     }
1968     record_for_igvn(eq_region);
1969     set_control(_gvn.transform(eq_region));
1970   } else {
1971     if (null_ctl == top()) {
1972       replace_in_map(input, cast);
1973     }
1974     set_control(_gvn.transform(ne_region));
1975   }
1976 }
1977 
1978 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
1979   Node* ne_region = new RegionNode(1);
1980   Node* null_ctl;
1981   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
1982   ne_region->add_req(null_ctl);
1983 
1984   {
1985     BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
1986     inc_sp(2);
1987     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
1988   }
1989 
1990   ne_region->add_req(control());
1991 
1992   record_for_igvn(ne_region);
1993   set_control(_gvn.transform(ne_region));
1994   if (btest == BoolTest::ne) {
1995     {
1996       PreserveJVMState pjvms(this);
1997       if (null_ctl == top()) {
1998         replace_in_map(input, cast);
1999       }
2000       int target_bci = iter().get_dest();
2001       merge(target_bci);
2002     }
2003     record_for_igvn(eq_region);
2004     set_control(_gvn.transform(eq_region));
2005   } else {
2006     if (null_ctl == top()) {
2007       replace_in_map(input, cast);
2008     }
2009     set_control(_gvn.transform(ne_region));
2010   }
2011 }
2012 
2013 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2014   ciKlass* left_type = NULL;
2015   ciKlass* right_type = NULL;
2016   ProfilePtrKind left_ptr = ProfileUnknownNull;
2017   ProfilePtrKind right_ptr = ProfileUnknownNull;
2018   bool left_inline_type = true;
2019   bool right_inline_type = true;
2020 
2021   // Leverage profiling at acmp
2022   if (UseACmpProfile) {
2023     method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2024     if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2025       left_type = NULL;
2026       right_type = NULL;
2027       left_inline_type = true;
2028       right_inline_type = true;
2029     }
2030     if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2031       left_ptr = ProfileUnknownNull;
2032       right_ptr = ProfileUnknownNull;
2033     }
2034   }
2035 
2036   if (UseTypeSpeculation) {
2037     record_profile_for_speculation(left, left_type, left_ptr);
2038     record_profile_for_speculation(right, right_type, right_ptr);
2039   }
2040 
2041   if (!EnableValhalla) {
2042     Node* cmp = CmpP(left, right);
2043     cmp = optimize_cmp_with_klass(cmp);
2044     do_if(btest, cmp);
2045     return;
2046   }
2047 
2048   // Check for equality before potentially allocating
2049   if (left == right) {
2050     do_if(btest, makecon(TypeInt::CC_EQ));
2051     return;
2052   }
2053 
2054   // Allocate inline type operands and re-execute on deoptimization
2055   if (left->is_InlineType()) {
2056     if (_gvn.type(right)->is_zero_type() ||
2057         (right->is_InlineType() && _gvn.type(right->as_InlineType()->get_is_init())->is_zero_type())) {
2058       // Null checking a scalarized but nullable inline type. Check the IsInit
2059       // input instead of the oop input to avoid keeping buffer allocations alive.
2060       Node* cmp = CmpI(left->as_InlineType()->get_is_init(), intcon(0));
2061       do_if(btest, cmp);
2062       return;
2063     } else {
2064       PreserveReexecuteState preexecs(this);
2065       inc_sp(2);
2066       jvms()->set_should_reexecute(true);
2067       left = left->as_InlineType()->buffer(this)->get_oop();
2068     }
2069   }
2070   if (right->is_InlineType()) {
2071     PreserveReexecuteState preexecs(this);
2072     inc_sp(2);
2073     jvms()->set_should_reexecute(true);
2074     right = right->as_InlineType()->buffer(this)->get_oop();
2075   }
2076 
2077   // First, do a normal pointer comparison
2078   const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2079   const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2080   Node* cmp = CmpP(left, right);
2081   cmp = optimize_cmp_with_klass(cmp);
2082   if (tleft == NULL || !tleft->can_be_inline_type() ||
2083       tright == NULL || !tright->can_be_inline_type()) {
2084     // This is sufficient, if one of the operands can't be an inline type
2085     do_if(btest, cmp);
2086     return;
2087   }
2088   Node* eq_region = NULL;
2089   if (btest == BoolTest::eq) {
2090     do_if(btest, cmp, true);
2091     if (stopped()) {
2092       return;
2093     }
2094   } else {
2095     assert(btest == BoolTest::ne, "only eq or ne");
2096     Node* is_not_equal = NULL;
2097     eq_region = new RegionNode(3);
2098     {
2099       PreserveJVMState pjvms(this);
2100       do_if(btest, cmp, false, &is_not_equal);
2101       if (!stopped()) {
2102         eq_region->init_req(1, control());
2103       }
2104     }
2105     if (is_not_equal == NULL || is_not_equal->is_top()) {
2106       record_for_igvn(eq_region);
2107       set_control(_gvn.transform(eq_region));
2108       return;
2109     }
2110     set_control(is_not_equal);
2111   }
2112 
2113   // Prefer speculative types if available
2114   if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2115     if (tleft->speculative_type() != NULL) {
2116       left_type = tleft->speculative_type();
2117     }
2118     if (tright->speculative_type() != NULL) {
2119       right_type = tright->speculative_type();
2120     }
2121   }
2122 
2123   if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2124     ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2125     if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2126       left_ptr = speculative_left_ptr;
2127     } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2128       left_ptr = speculative_left_ptr;
2129     }
2130   }
2131   if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2132     ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2133     if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2134       right_ptr = speculative_right_ptr;
2135     } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2136       right_ptr = speculative_right_ptr;
2137     }
2138   }
2139 
2140   if (left_ptr == ProfileAlwaysNull) {
2141     // Comparison with null. Assert the input is indeed null and we're done.
2142     acmp_always_null_input(left, tleft, btest, eq_region);
2143     return;
2144   }
2145   if (right_ptr == ProfileAlwaysNull) {
2146     // Comparison with null. Assert the input is indeed null and we're done.
2147     acmp_always_null_input(right, tright, btest, eq_region);
2148     return;
2149   }
2150   if (left_type != NULL && !left_type->is_inlinetype()) {
2151     // Comparison with an object of known type
2152     acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2153     return;
2154   }
2155   if (right_type != NULL && !right_type->is_inlinetype()) {
2156     // Comparison with an object of known type
2157     acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2158     return;
2159   }
2160   if (!left_inline_type) {
2161     // Comparison with an object known not to be an inline type
2162     acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2163     return;
2164   }
2165   if (!right_inline_type) {
2166     // Comparison with an object known not to be an inline type
2167     acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2168     return;
2169   }
2170 
2171   // Pointers are not equal, check if first operand is non-null
2172   Node* ne_region = new RegionNode(6);
2173   Node* null_ctl;
2174   Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2175   ne_region->init_req(1, null_ctl);
2176 
2177   // First operand is non-null, check if it is an inline type
2178   Node* is_value = inline_type_test(not_null_right);
2179   IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2180   Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2181   ne_region->init_req(2, not_value);
2182   set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2183 
2184   // The first operand is an inline type, check if the second operand is non-null
2185   Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2186   ne_region->init_req(3, null_ctl);
2187 
2188   // Check if both operands are of the same class.
2189   Node* kls_left = load_object_klass(not_null_left);
2190   Node* kls_right = load_object_klass(not_null_right);
2191   Node* kls_cmp = CmpP(kls_left, kls_right);
2192   Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2193   IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2194   Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2195   set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2196   ne_region->init_req(4, kls_ne);
2197 
2198   if (stopped()) {
2199     record_for_igvn(ne_region);
2200     set_control(_gvn.transform(ne_region));
2201     if (btest == BoolTest::ne) {
2202       {
2203         PreserveJVMState pjvms(this);
2204         int target_bci = iter().get_dest();
2205         merge(target_bci);
2206       }
2207       record_for_igvn(eq_region);
2208       set_control(_gvn.transform(eq_region));
2209     }
2210     return;
2211   }
2212 
2213   // Both operands are values types of the same class, we need to perform a
2214   // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2215   Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2216   Node* mem = reset_memory();
2217   Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2218 
2219   Node* eq_io_phi = NULL;
2220   Node* eq_mem_phi = NULL;
2221   if (eq_region != NULL) {
2222     eq_io_phi = PhiNode::make(eq_region, i_o());
2223     eq_mem_phi = PhiNode::make(eq_region, mem);
2224   }
2225 
2226   set_all_memory(mem);
2227 
2228   kill_dead_locals();
2229   ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature());
2230   CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2231   call->set_override_symbolic_info(true);
2232   call->init_req(TypeFunc::Parms, not_null_left);
2233   call->init_req(TypeFunc::Parms+1, not_null_right);
2234   inc_sp(2);
2235   set_edges_for_java_call(call, false, false);
2236   Node* ret = set_results_for_java_call(call, false, true);
2237   dec_sp(2);
2238 
2239   // Test the return value of ValueObjectMethods::isSubstitutable()
2240   Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2241   Node* ctl = C->top();
2242   if (btest == BoolTest::eq) {
2243     PreserveJVMState pjvms(this);
2244     do_if(btest, subst_cmp);
2245     if (!stopped()) {
2246       ctl = control();
2247     }
2248   } else {
2249     assert(btest == BoolTest::ne, "only eq or ne");
2250     PreserveJVMState pjvms(this);
2251     do_if(btest, subst_cmp, false, &ctl);
2252     if (!stopped()) {
2253       eq_region->init_req(2, control());
2254       eq_io_phi->init_req(2, i_o());
2255       eq_mem_phi->init_req(2, reset_memory());
2256     }
2257   }
2258   ne_region->init_req(5, ctl);
2259   ne_io_phi->init_req(5, i_o());
2260   ne_mem_phi->init_req(5, reset_memory());
2261 
2262   record_for_igvn(ne_region);
2263   set_control(_gvn.transform(ne_region));
2264   set_i_o(_gvn.transform(ne_io_phi));
2265   set_all_memory(_gvn.transform(ne_mem_phi));
2266 
2267   if (btest == BoolTest::ne) {
2268     {
2269       PreserveJVMState pjvms(this);
2270       int target_bci = iter().get_dest();
2271       merge(target_bci);
2272     }
2273 
2274     record_for_igvn(eq_region);
2275     set_control(_gvn.transform(eq_region));
2276     set_i_o(_gvn.transform(eq_io_phi));
2277     set_all_memory(_gvn.transform(eq_mem_phi));
2278   }
2279 }
2280 
2281 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2282   // Don't want to speculate on uncommon traps when running with -Xcomp
2283   if (!UseInterpreter) {
2284     return false;
2285   }
2286   return (seems_never_taken(prob) && seems_stable_comparison());
2287 }
2288 
2289 void Parse::maybe_add_predicate_after_if(Block* path) {
2290   if (path->is_SEL_head() && path->preds_parsed() == 0) {
2291     // Add predicates at bci of if dominating the loop so traps can be
2292     // recorded on the if's profile data
2293     int bc_depth = repush_if_args();
2294     add_empty_predicates();
2295     dec_sp(bc_depth);
2296     path->set_has_predicates();
2297   }
2298 }
2299 
2300 
2301 //----------------------------adjust_map_after_if------------------------------
2302 // Adjust the JVM state to reflect the result of taking this path.
2303 // Basically, it means inspecting the CmpNode controlling this
2304 // branch, seeing how it constrains a tested value, and then
2305 // deciding if it's worth our while to encode this constraint
2306 // as graph nodes in the current abstract interpretation map.
2307 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) {
2308   if (!c->is_Cmp()) {
2309     maybe_add_predicate_after_if(path);
2310     return;
2311   }
2312 
2313   if (stopped() || btest == BoolTest::illegal) {
2314     return;                             // nothing to do
2315   }
2316 
2317   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2318 
2319   if (path_is_suitable_for_uncommon_trap(prob)) {
2320     repush_if_args();
2321     Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2322                   Deoptimization::Action_reinterpret,
2323                   NULL,
2324                   (is_fallthrough ? "taken always" : "taken never"));
2325 
2326     if (call != nullptr) {
2327       C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2328     }
2329     return;
2330   }
2331 
2332   Node* val = c->in(1);
2333   Node* con = c->in(2);
2334   const Type* tcon = _gvn.type(con);
2335   const Type* tval = _gvn.type(val);
2336   bool have_con = tcon->singleton();
2337   if (tval->singleton()) {
2338     if (!have_con) {
2339       // Swap, so constant is in con.
2340       con  = val;
2341       tcon = tval;
2342       val  = c->in(2);
2343       tval = _gvn.type(val);
2344       btest = BoolTest(btest).commute();
2345       have_con = true;
2346     } else {
2347       // Do we have two constants?  Then leave well enough alone.
2348       have_con = false;
2349     }
2350   }
2351   if (!have_con) {                        // remaining adjustments need a con
2352     maybe_add_predicate_after_if(path);
2353     return;
2354   }
2355 
2356   sharpen_type_after_if(btest, con, tcon, val, tval);
2357   maybe_add_predicate_after_if(path);
2358 }
2359 
2360 
2361 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
2362   Node* ldk;
2363   if (n->is_DecodeNKlass()) {
2364     if (n->in(1)->Opcode() != Op_LoadNKlass) {
2365       return NULL;
2366     } else {
2367       ldk = n->in(1);
2368     }
2369   } else if (n->Opcode() != Op_LoadKlass) {
2370     return NULL;
2371   } else {
2372     ldk = n;
2373   }
2374   assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
2375 
2376   Node* adr = ldk->in(MemNode::Address);
2377   intptr_t off = 0;
2378   Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
2379   if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
2380     return NULL;
2381   const TypePtr* tp = gvn->type(obj)->is_ptr();
2382   if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
2383     return NULL;
2384 
2385   return obj;
2386 }
2387 
2388 void Parse::sharpen_type_after_if(BoolTest::mask btest,
2389                                   Node* con, const Type* tcon,
2390                                   Node* val, const Type* tval) {
2391   // Look for opportunities to sharpen the type of a node
2392   // whose klass is compared with a constant klass.
2393   if (btest == BoolTest::eq && tcon->isa_klassptr()) {
2394     Node* obj = extract_obj_from_klass_load(&_gvn, val);
2395     const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
2396     if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) {
2397        // Found:
2398        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2399        // or the narrowOop equivalent.
2400        const Type* obj_type = _gvn.type(obj);
2401        const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
2402        if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
2403            tboth->higher_equal(obj_type)) {
2404           // obj has to be of the exact type Foo if the CmpP succeeds.
2405           int obj_in_map = map()->find_edge(obj);
2406           JVMState* jvms = this->jvms();
2407           if (obj_in_map >= 0 &&
2408               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2409             TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2410             const Type* tcc = ccast->as_Type()->type();
2411             assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2412             // Delay transform() call to allow recovery of pre-cast value
2413             // at the control merge.
2414             _gvn.set_type_bottom(ccast);
2415             record_for_igvn(ccast);
2416             // Here's the payoff.
2417             replace_in_map(obj, ccast);
2418           }
2419        }
2420     }
2421   }
2422 
2423   int val_in_map = map()->find_edge(val);
2424   if (val_in_map < 0)  return;          // replace_in_map would be useless
2425   {
2426     JVMState* jvms = this->jvms();
2427     if (!(jvms->is_loc(val_in_map) ||
2428           jvms->is_stk(val_in_map)))
2429       return;                           // again, it would be useless
2430   }
2431 
2432   // Check for a comparison to a constant, and "know" that the compared
2433   // value is constrained on this path.
2434   assert(tcon->singleton(), "");
2435   ConstraintCastNode* ccast = NULL;
2436   Node* cast = NULL;
2437 
2438   switch (btest) {
2439   case BoolTest::eq:                    // Constant test?
2440     {
2441       const Type* tboth = tcon->join_speculative(tval);
2442       if (tboth == tval)  break;        // Nothing to gain.
2443       if (tcon->isa_int()) {
2444         ccast = new CastIINode(val, tboth);
2445       } else if (tcon == TypePtr::NULL_PTR) {
2446         // Cast to null, but keep the pointer identity temporarily live.
2447         ccast = new CastPPNode(val, tboth);
2448       } else {
2449         const TypeF* tf = tcon->isa_float_constant();
2450         const TypeD* td = tcon->isa_double_constant();
2451         // Exclude tests vs float/double 0 as these could be
2452         // either +0 or -0.  Just because you are equal to +0
2453         // doesn't mean you ARE +0!
2454         // Note, following code also replaces Long and Oop values.
2455         if ((!tf || tf->_f != 0.0) &&
2456             (!td || td->_d != 0.0))
2457           cast = con;                   // Replace non-constant val by con.
2458       }
2459     }
2460     break;
2461 
2462   case BoolTest::ne:
2463     if (tcon == TypePtr::NULL_PTR) {
2464       cast = cast_not_null(val, false);
2465     }
2466     break;
2467 
2468   default:
2469     // (At this point we could record int range types with CastII.)
2470     break;
2471   }
2472 
2473   if (ccast != NULL) {
2474     const Type* tcc = ccast->as_Type()->type();
2475     assert(tcc != tval && tcc->higher_equal(tval), "must improve");
2476     // Delay transform() call to allow recovery of pre-cast value
2477     // at the control merge.
2478     ccast->set_req(0, control());
2479     _gvn.set_type_bottom(ccast);
2480     record_for_igvn(ccast);
2481     cast = ccast;
2482   }
2483 
2484   if (cast != NULL) {                   // Here's the payoff.
2485     replace_in_map(val, cast);
2486   }
2487 }
2488 
2489 /**
2490  * Use speculative type to optimize CmpP node: if comparison is
2491  * against the low level class, cast the object to the speculative
2492  * type if any. CmpP should then go away.
2493  *
2494  * @param c  expected CmpP node
2495  * @return   result of CmpP on object casted to speculative type
2496  *
2497  */
2498 Node* Parse::optimize_cmp_with_klass(Node* c) {
2499   // If this is transformed by the _gvn to a comparison with the low
2500   // level klass then we may be able to use speculation
2501   if (c->Opcode() == Op_CmpP &&
2502       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2503       c->in(2)->is_Con()) {
2504     Node* load_klass = NULL;
2505     Node* decode = NULL;
2506     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2507       decode = c->in(1);
2508       load_klass = c->in(1)->in(1);
2509     } else {
2510       load_klass = c->in(1);
2511     }
2512     if (load_klass->in(2)->is_AddP()) {
2513       Node* addp = load_klass->in(2);
2514       Node* obj = addp->in(AddPNode::Address);
2515       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2516       if (obj_type->speculative_type_not_null() != NULL) {
2517         ciKlass* k = obj_type->speculative_type();
2518         inc_sp(2);
2519         obj = maybe_cast_profiled_obj(obj, k);
2520         dec_sp(2);
2521         if (obj->is_InlineType()) {
2522           assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2523           obj = obj->as_InlineType()->get_oop();
2524         }
2525         // Make the CmpP use the casted obj
2526         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2527         load_klass = load_klass->clone();
2528         load_klass->set_req(2, addp);
2529         load_klass = _gvn.transform(load_klass);
2530         if (decode != NULL) {
2531           decode = decode->clone();
2532           decode->set_req(1, load_klass);
2533           load_klass = _gvn.transform(decode);
2534         }
2535         c = c->clone();
2536         c->set_req(1, load_klass);
2537         c = _gvn.transform(c);
2538       }
2539     }
2540   }
2541   return c;
2542 }
2543 
2544 //------------------------------do_one_bytecode--------------------------------
2545 // Parse this bytecode, and alter the Parsers JVM->Node mapping
2546 void Parse::do_one_bytecode() {
2547   Node *a, *b, *c, *d;          // Handy temps
2548   BoolTest::mask btest;
2549   int i;
2550 
2551   assert(!has_exceptions(), "bytecode entry state must be clear of throws");
2552 
2553   if (C->check_node_count(NodeLimitFudgeFactor * 5,
2554                           "out of nodes parsing method")) {
2555     return;
2556   }
2557 
2558 #ifdef ASSERT
2559   // for setting breakpoints
2560   if (TraceOptoParse) {
2561     tty->print(" @");
2562     dump_bci(bci());
2563     tty->print(" %s", Bytecodes::name(bc()));
2564     tty->cr();
2565   }
2566 #endif
2567 
2568   switch (bc()) {
2569   case Bytecodes::_nop:
2570     // do nothing
2571     break;
2572   case Bytecodes::_lconst_0:
2573     push_pair(longcon(0));
2574     break;
2575 
2576   case Bytecodes::_lconst_1:
2577     push_pair(longcon(1));
2578     break;
2579 
2580   case Bytecodes::_fconst_0:
2581     push(zerocon(T_FLOAT));
2582     break;
2583 
2584   case Bytecodes::_fconst_1:
2585     push(makecon(TypeF::ONE));
2586     break;
2587 
2588   case Bytecodes::_fconst_2:
2589     push(makecon(TypeF::make(2.0f)));
2590     break;
2591 
2592   case Bytecodes::_dconst_0:
2593     push_pair(zerocon(T_DOUBLE));
2594     break;
2595 
2596   case Bytecodes::_dconst_1:
2597     push_pair(makecon(TypeD::ONE));
2598     break;
2599 
2600   case Bytecodes::_iconst_m1:push(intcon(-1)); break;
2601   case Bytecodes::_iconst_0: push(intcon( 0)); break;
2602   case Bytecodes::_iconst_1: push(intcon( 1)); break;
2603   case Bytecodes::_iconst_2: push(intcon( 2)); break;
2604   case Bytecodes::_iconst_3: push(intcon( 3)); break;
2605   case Bytecodes::_iconst_4: push(intcon( 4)); break;
2606   case Bytecodes::_iconst_5: push(intcon( 5)); break;
2607   case Bytecodes::_bipush:   push(intcon(iter().get_constant_u1())); break;
2608   case Bytecodes::_sipush:   push(intcon(iter().get_constant_u2())); break;
2609   case Bytecodes::_aconst_null: push(null());  break;
2610 
2611   case Bytecodes::_ldc:
2612   case Bytecodes::_ldc_w:
2613   case Bytecodes::_ldc2_w: {
2614     ciConstant constant = iter().get_constant();
2615     if (constant.is_loaded()) {
2616       const Type* con_type = Type::make_from_constant(constant);
2617       if (con_type != NULL) {
2618         push_node(con_type->basic_type(), makecon(con_type));
2619       }
2620     } else {
2621       // If the constant is unresolved or in error state, run this BC in the interpreter.
2622       if (iter().is_in_error()) {
2623         uncommon_trap(Deoptimization::make_trap_request(Deoptimization::Reason_unhandled,
2624                                                         Deoptimization::Action_none),
2625                       NULL, "constant in error state", true /* must_throw */);
2626 
2627       } else {
2628         int index = iter().get_constant_pool_index();
2629         uncommon_trap(Deoptimization::make_trap_request(Deoptimization::Reason_unloaded,
2630                                                         Deoptimization::Action_reinterpret,
2631                                                         index),
2632                       NULL, "unresolved constant", false /* must_throw */);
2633       }
2634     }
2635     break;
2636   }
2637 
2638   case Bytecodes::_aload_0:
2639     push( local(0) );
2640     break;
2641   case Bytecodes::_aload_1:
2642     push( local(1) );
2643     break;
2644   case Bytecodes::_aload_2:
2645     push( local(2) );
2646     break;
2647   case Bytecodes::_aload_3:
2648     push( local(3) );
2649     break;
2650   case Bytecodes::_aload:
2651     push( local(iter().get_index()) );
2652     break;
2653 
2654   case Bytecodes::_fload_0:
2655   case Bytecodes::_iload_0:
2656     push( local(0) );
2657     break;
2658   case Bytecodes::_fload_1:
2659   case Bytecodes::_iload_1:
2660     push( local(1) );
2661     break;
2662   case Bytecodes::_fload_2:
2663   case Bytecodes::_iload_2:
2664     push( local(2) );
2665     break;
2666   case Bytecodes::_fload_3:
2667   case Bytecodes::_iload_3:
2668     push( local(3) );
2669     break;
2670   case Bytecodes::_fload:
2671   case Bytecodes::_iload:
2672     push( local(iter().get_index()) );
2673     break;
2674   case Bytecodes::_lload_0:
2675     push_pair_local( 0 );
2676     break;
2677   case Bytecodes::_lload_1:
2678     push_pair_local( 1 );
2679     break;
2680   case Bytecodes::_lload_2:
2681     push_pair_local( 2 );
2682     break;
2683   case Bytecodes::_lload_3:
2684     push_pair_local( 3 );
2685     break;
2686   case Bytecodes::_lload:
2687     push_pair_local( iter().get_index() );
2688     break;
2689 
2690   case Bytecodes::_dload_0:
2691     push_pair_local(0);
2692     break;
2693   case Bytecodes::_dload_1:
2694     push_pair_local(1);
2695     break;
2696   case Bytecodes::_dload_2:
2697     push_pair_local(2);
2698     break;
2699   case Bytecodes::_dload_3:
2700     push_pair_local(3);
2701     break;
2702   case Bytecodes::_dload:
2703     push_pair_local(iter().get_index());
2704     break;
2705   case Bytecodes::_fstore_0:
2706   case Bytecodes::_istore_0:
2707   case Bytecodes::_astore_0:
2708     set_local( 0, pop() );
2709     break;
2710   case Bytecodes::_fstore_1:
2711   case Bytecodes::_istore_1:
2712   case Bytecodes::_astore_1:
2713     set_local( 1, pop() );
2714     break;
2715   case Bytecodes::_fstore_2:
2716   case Bytecodes::_istore_2:
2717   case Bytecodes::_astore_2:
2718     set_local( 2, pop() );
2719     break;
2720   case Bytecodes::_fstore_3:
2721   case Bytecodes::_istore_3:
2722   case Bytecodes::_astore_3:
2723     set_local( 3, pop() );
2724     break;
2725   case Bytecodes::_fstore:
2726   case Bytecodes::_istore:
2727   case Bytecodes::_astore:
2728     set_local( iter().get_index(), pop() );
2729     break;
2730   // long stores
2731   case Bytecodes::_lstore_0:
2732     set_pair_local( 0, pop_pair() );
2733     break;
2734   case Bytecodes::_lstore_1:
2735     set_pair_local( 1, pop_pair() );
2736     break;
2737   case Bytecodes::_lstore_2:
2738     set_pair_local( 2, pop_pair() );
2739     break;
2740   case Bytecodes::_lstore_3:
2741     set_pair_local( 3, pop_pair() );
2742     break;
2743   case Bytecodes::_lstore:
2744     set_pair_local( iter().get_index(), pop_pair() );
2745     break;
2746 
2747   // double stores
2748   case Bytecodes::_dstore_0:
2749     set_pair_local( 0, dprecision_rounding(pop_pair()) );
2750     break;
2751   case Bytecodes::_dstore_1:
2752     set_pair_local( 1, dprecision_rounding(pop_pair()) );
2753     break;
2754   case Bytecodes::_dstore_2:
2755     set_pair_local( 2, dprecision_rounding(pop_pair()) );
2756     break;
2757   case Bytecodes::_dstore_3:
2758     set_pair_local( 3, dprecision_rounding(pop_pair()) );
2759     break;
2760   case Bytecodes::_dstore:
2761     set_pair_local( iter().get_index(), dprecision_rounding(pop_pair()) );
2762     break;
2763 
2764   case Bytecodes::_pop:  dec_sp(1);   break;
2765   case Bytecodes::_pop2: dec_sp(2);   break;
2766   case Bytecodes::_swap:
2767     a = pop();
2768     b = pop();
2769     push(a);
2770     push(b);
2771     break;
2772   case Bytecodes::_dup:
2773     a = pop();
2774     push(a);
2775     push(a);
2776     break;
2777   case Bytecodes::_dup_x1:
2778     a = pop();
2779     b = pop();
2780     push( a );
2781     push( b );
2782     push( a );
2783     break;
2784   case Bytecodes::_dup_x2:
2785     a = pop();
2786     b = pop();
2787     c = pop();
2788     push( a );
2789     push( c );
2790     push( b );
2791     push( a );
2792     break;
2793   case Bytecodes::_dup2:
2794     a = pop();
2795     b = pop();
2796     push( b );
2797     push( a );
2798     push( b );
2799     push( a );
2800     break;
2801 
2802   case Bytecodes::_dup2_x1:
2803     // before: .. c, b, a
2804     // after:  .. b, a, c, b, a
2805     // not tested
2806     a = pop();
2807     b = pop();
2808     c = pop();
2809     push( b );
2810     push( a );
2811     push( c );
2812     push( b );
2813     push( a );
2814     break;
2815   case Bytecodes::_dup2_x2:
2816     // before: .. d, c, b, a
2817     // after:  .. b, a, d, c, b, a
2818     // not tested
2819     a = pop();
2820     b = pop();
2821     c = pop();
2822     d = pop();
2823     push( b );
2824     push( a );
2825     push( d );
2826     push( c );
2827     push( b );
2828     push( a );
2829     break;
2830 
2831   case Bytecodes::_arraylength: {
2832     // Must do null-check with value on expression stack
2833     Node *ary = null_check(peek(), T_ARRAY);
2834     // Compile-time detect of null-exception?
2835     if (stopped())  return;
2836     a = pop();
2837     push(load_array_length(a));
2838     break;
2839   }
2840 
2841   case Bytecodes::_baload:  array_load(T_BYTE);    break;
2842   case Bytecodes::_caload:  array_load(T_CHAR);    break;
2843   case Bytecodes::_iaload:  array_load(T_INT);     break;
2844   case Bytecodes::_saload:  array_load(T_SHORT);   break;
2845   case Bytecodes::_faload:  array_load(T_FLOAT);   break;
2846   case Bytecodes::_aaload:  array_load(T_OBJECT);  break;
2847   case Bytecodes::_laload:  array_load(T_LONG);    break;
2848   case Bytecodes::_daload:  array_load(T_DOUBLE);  break;
2849   case Bytecodes::_bastore: array_store(T_BYTE);   break;
2850   case Bytecodes::_castore: array_store(T_CHAR);   break;
2851   case Bytecodes::_iastore: array_store(T_INT);    break;
2852   case Bytecodes::_sastore: array_store(T_SHORT);  break;
2853   case Bytecodes::_fastore: array_store(T_FLOAT);  break;
2854   case Bytecodes::_aastore: array_store(T_OBJECT); break;
2855   case Bytecodes::_lastore: array_store(T_LONG);   break;
2856   case Bytecodes::_dastore: array_store(T_DOUBLE); break;
2857 
2858   case Bytecodes::_getfield:
2859     do_getfield();
2860     break;
2861 
2862   case Bytecodes::_getstatic:
2863     do_getstatic();
2864     break;
2865 
2866   case Bytecodes::_putfield:
2867     do_putfield();
2868     break;
2869 
2870   case Bytecodes::_putstatic:
2871     do_putstatic();
2872     break;
2873 
2874   case Bytecodes::_irem:
2875     // Must keep both values on the expression-stack during null-check
2876     zero_check_int(peek());
2877     // Compile-time detect of null-exception?
2878     if (stopped())  return;
2879     b = pop();
2880     a = pop();
2881     push(_gvn.transform(new ModINode(control(), a, b)));
2882     break;
2883   case Bytecodes::_idiv:
2884     // Must keep both values on the expression-stack during null-check
2885     zero_check_int(peek());
2886     // Compile-time detect of null-exception?
2887     if (stopped())  return;
2888     b = pop();
2889     a = pop();
2890     push( _gvn.transform( new DivINode(control(),a,b) ) );
2891     break;
2892   case Bytecodes::_imul:
2893     b = pop(); a = pop();
2894     push( _gvn.transform( new MulINode(a,b) ) );
2895     break;
2896   case Bytecodes::_iadd:
2897     b = pop(); a = pop();
2898     push( _gvn.transform( new AddINode(a,b) ) );
2899     break;
2900   case Bytecodes::_ineg:
2901     a = pop();
2902     push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) );
2903     break;
2904   case Bytecodes::_isub:
2905     b = pop(); a = pop();
2906     push( _gvn.transform( new SubINode(a,b) ) );
2907     break;
2908   case Bytecodes::_iand:
2909     b = pop(); a = pop();
2910     push( _gvn.transform( new AndINode(a,b) ) );
2911     break;
2912   case Bytecodes::_ior:
2913     b = pop(); a = pop();
2914     push( _gvn.transform( new OrINode(a,b) ) );
2915     break;
2916   case Bytecodes::_ixor:
2917     b = pop(); a = pop();
2918     push( _gvn.transform( new XorINode(a,b) ) );
2919     break;
2920   case Bytecodes::_ishl:
2921     b = pop(); a = pop();
2922     push( _gvn.transform( new LShiftINode(a,b) ) );
2923     break;
2924   case Bytecodes::_ishr:
2925     b = pop(); a = pop();
2926     push( _gvn.transform( new RShiftINode(a,b) ) );
2927     break;
2928   case Bytecodes::_iushr:
2929     b = pop(); a = pop();
2930     push( _gvn.transform( new URShiftINode(a,b) ) );
2931     break;
2932 
2933   case Bytecodes::_fneg:
2934     a = pop();
2935     b = _gvn.transform(new NegFNode (a));
2936     push(b);
2937     break;
2938 
2939   case Bytecodes::_fsub:
2940     b = pop();
2941     a = pop();
2942     c = _gvn.transform( new SubFNode(a,b) );
2943     d = precision_rounding(c);
2944     push( d );
2945     break;
2946 
2947   case Bytecodes::_fadd:
2948     b = pop();
2949     a = pop();
2950     c = _gvn.transform( new AddFNode(a,b) );
2951     d = precision_rounding(c);
2952     push( d );
2953     break;
2954 
2955   case Bytecodes::_fmul:
2956     b = pop();
2957     a = pop();
2958     c = _gvn.transform( new MulFNode(a,b) );
2959     d = precision_rounding(c);
2960     push( d );
2961     break;
2962 
2963   case Bytecodes::_fdiv:
2964     b = pop();
2965     a = pop();
2966     c = _gvn.transform( new DivFNode(0,a,b) );
2967     d = precision_rounding(c);
2968     push( d );
2969     break;
2970 
2971   case Bytecodes::_frem:
2972     if (Matcher::has_match_rule(Op_ModF)) {
2973       // Generate a ModF node.
2974       b = pop();
2975       a = pop();
2976       c = _gvn.transform( new ModFNode(0,a,b) );
2977       d = precision_rounding(c);
2978       push( d );
2979     }
2980     else {
2981       // Generate a call.
2982       modf();
2983     }
2984     break;
2985 
2986   case Bytecodes::_fcmpl:
2987     b = pop();
2988     a = pop();
2989     c = _gvn.transform( new CmpF3Node( a, b));
2990     push(c);
2991     break;
2992   case Bytecodes::_fcmpg:
2993     b = pop();
2994     a = pop();
2995 
2996     // Same as fcmpl but need to flip the unordered case.  Swap the inputs,
2997     // which negates the result sign except for unordered.  Flip the unordered
2998     // as well by using CmpF3 which implements unordered-lesser instead of
2999     // unordered-greater semantics.  Finally, commute the result bits.  Result
3000     // is same as using a CmpF3Greater except we did it with CmpF3 alone.
3001     c = _gvn.transform( new CmpF3Node( b, a));
3002     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3003     push(c);
3004     break;
3005 
3006   case Bytecodes::_f2i:
3007     a = pop();
3008     push(_gvn.transform(new ConvF2INode(a)));
3009     break;
3010 
3011   case Bytecodes::_d2i:
3012     a = pop_pair();
3013     b = _gvn.transform(new ConvD2INode(a));
3014     push( b );
3015     break;
3016 
3017   case Bytecodes::_f2d:
3018     a = pop();
3019     b = _gvn.transform( new ConvF2DNode(a));
3020     push_pair( b );
3021     break;
3022 
3023   case Bytecodes::_d2f:
3024     a = pop_pair();
3025     b = _gvn.transform( new ConvD2FNode(a));
3026     // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
3027     //b = _gvn.transform(new RoundFloatNode(0, b) );
3028     push( b );
3029     break;
3030 
3031   case Bytecodes::_l2f:
3032     if (Matcher::convL2FSupported()) {
3033       a = pop_pair();
3034       b = _gvn.transform( new ConvL2FNode(a));
3035       // For x86_32.ad, FILD doesn't restrict precision to 24 or 53 bits.
3036       // Rather than storing the result into an FP register then pushing
3037       // out to memory to round, the machine instruction that implements
3038       // ConvL2D is responsible for rounding.
3039       // c = precision_rounding(b);
3040       push(b);
3041     } else {
3042       l2f();
3043     }
3044     break;
3045 
3046   case Bytecodes::_l2d:
3047     a = pop_pair();
3048     b = _gvn.transform( new ConvL2DNode(a));
3049     // For x86_32.ad, rounding is always necessary (see _l2f above).
3050     // c = dprecision_rounding(b);
3051     push_pair(b);
3052     break;
3053 
3054   case Bytecodes::_f2l:
3055     a = pop();
3056     b = _gvn.transform( new ConvF2LNode(a));
3057     push_pair(b);
3058     break;
3059 
3060   case Bytecodes::_d2l:
3061     a = pop_pair();
3062     b = _gvn.transform( new ConvD2LNode(a));
3063     push_pair(b);
3064     break;
3065 
3066   case Bytecodes::_dsub:
3067     b = pop_pair();
3068     a = pop_pair();
3069     c = _gvn.transform( new SubDNode(a,b) );
3070     d = dprecision_rounding(c);
3071     push_pair( d );
3072     break;
3073 
3074   case Bytecodes::_dadd:
3075     b = pop_pair();
3076     a = pop_pair();
3077     c = _gvn.transform( new AddDNode(a,b) );
3078     d = dprecision_rounding(c);
3079     push_pair( d );
3080     break;
3081 
3082   case Bytecodes::_dmul:
3083     b = pop_pair();
3084     a = pop_pair();
3085     c = _gvn.transform( new MulDNode(a,b) );
3086     d = dprecision_rounding(c);
3087     push_pair( d );
3088     break;
3089 
3090   case Bytecodes::_ddiv:
3091     b = pop_pair();
3092     a = pop_pair();
3093     c = _gvn.transform( new DivDNode(0,a,b) );
3094     d = dprecision_rounding(c);
3095     push_pair( d );
3096     break;
3097 
3098   case Bytecodes::_dneg:
3099     a = pop_pair();
3100     b = _gvn.transform(new NegDNode (a));
3101     push_pair(b);
3102     break;
3103 
3104   case Bytecodes::_drem:
3105     if (Matcher::has_match_rule(Op_ModD)) {
3106       // Generate a ModD node.
3107       b = pop_pair();
3108       a = pop_pair();
3109       // a % b
3110 
3111       c = _gvn.transform( new ModDNode(0,a,b) );
3112       d = dprecision_rounding(c);
3113       push_pair( d );
3114     }
3115     else {
3116       // Generate a call.
3117       modd();
3118     }
3119     break;
3120 
3121   case Bytecodes::_dcmpl:
3122     b = pop_pair();
3123     a = pop_pair();
3124     c = _gvn.transform( new CmpD3Node( a, b));
3125     push(c);
3126     break;
3127 
3128   case Bytecodes::_dcmpg:
3129     b = pop_pair();
3130     a = pop_pair();
3131     // Same as dcmpl but need to flip the unordered case.
3132     // Commute the inputs, which negates the result sign except for unordered.
3133     // Flip the unordered as well by using CmpD3 which implements
3134     // unordered-lesser instead of unordered-greater semantics.
3135     // Finally, negate the result bits.  Result is same as using a
3136     // CmpD3Greater except we did it with CmpD3 alone.
3137     c = _gvn.transform( new CmpD3Node( b, a));
3138     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3139     push(c);
3140     break;
3141 
3142 
3143     // Note for longs -> lo word is on TOS, hi word is on TOS - 1
3144   case Bytecodes::_land:
3145     b = pop_pair();
3146     a = pop_pair();
3147     c = _gvn.transform( new AndLNode(a,b) );
3148     push_pair(c);
3149     break;
3150   case Bytecodes::_lor:
3151     b = pop_pair();
3152     a = pop_pair();
3153     c = _gvn.transform( new OrLNode(a,b) );
3154     push_pair(c);
3155     break;
3156   case Bytecodes::_lxor:
3157     b = pop_pair();
3158     a = pop_pair();
3159     c = _gvn.transform( new XorLNode(a,b) );
3160     push_pair(c);
3161     break;
3162 
3163   case Bytecodes::_lshl:
3164     b = pop();                  // the shift count
3165     a = pop_pair();             // value to be shifted
3166     c = _gvn.transform( new LShiftLNode(a,b) );
3167     push_pair(c);
3168     break;
3169   case Bytecodes::_lshr:
3170     b = pop();                  // the shift count
3171     a = pop_pair();             // value to be shifted
3172     c = _gvn.transform( new RShiftLNode(a,b) );
3173     push_pair(c);
3174     break;
3175   case Bytecodes::_lushr:
3176     b = pop();                  // the shift count
3177     a = pop_pair();             // value to be shifted
3178     c = _gvn.transform( new URShiftLNode(a,b) );
3179     push_pair(c);
3180     break;
3181   case Bytecodes::_lmul:
3182     b = pop_pair();
3183     a = pop_pair();
3184     c = _gvn.transform( new MulLNode(a,b) );
3185     push_pair(c);
3186     break;
3187 
3188   case Bytecodes::_lrem:
3189     // Must keep both values on the expression-stack during null-check
3190     assert(peek(0) == top(), "long word order");
3191     zero_check_long(peek(1));
3192     // Compile-time detect of null-exception?
3193     if (stopped())  return;
3194     b = pop_pair();
3195     a = pop_pair();
3196     c = _gvn.transform( new ModLNode(control(),a,b) );
3197     push_pair(c);
3198     break;
3199 
3200   case Bytecodes::_ldiv:
3201     // Must keep both values on the expression-stack during null-check
3202     assert(peek(0) == top(), "long word order");
3203     zero_check_long(peek(1));
3204     // Compile-time detect of null-exception?
3205     if (stopped())  return;
3206     b = pop_pair();
3207     a = pop_pair();
3208     c = _gvn.transform( new DivLNode(control(),a,b) );
3209     push_pair(c);
3210     break;
3211 
3212   case Bytecodes::_ladd:
3213     b = pop_pair();
3214     a = pop_pair();
3215     c = _gvn.transform( new AddLNode(a,b) );
3216     push_pair(c);
3217     break;
3218   case Bytecodes::_lsub:
3219     b = pop_pair();
3220     a = pop_pair();
3221     c = _gvn.transform( new SubLNode(a,b) );
3222     push_pair(c);
3223     break;
3224   case Bytecodes::_lcmp:
3225     // Safepoints are now inserted _before_ branches.  The long-compare
3226     // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
3227     // slew of control flow.  These are usually followed by a CmpI vs zero and
3228     // a branch; this pattern then optimizes to the obvious long-compare and
3229     // branch.  However, if the branch is backwards there's a Safepoint
3230     // inserted.  The inserted Safepoint captures the JVM state at the
3231     // pre-branch point, i.e. it captures the 3-way value.  Thus if a
3232     // long-compare is used to control a loop the debug info will force
3233     // computation of the 3-way value, even though the generated code uses a
3234     // long-compare and branch.  We try to rectify the situation by inserting
3235     // a SafePoint here and have it dominate and kill the safepoint added at a
3236     // following backwards branch.  At this point the JVM state merely holds 2
3237     // longs but not the 3-way value.
3238     switch (iter().next_bc()) {
3239       case Bytecodes::_ifgt:
3240       case Bytecodes::_iflt:
3241       case Bytecodes::_ifge:
3242       case Bytecodes::_ifle:
3243       case Bytecodes::_ifne:
3244       case Bytecodes::_ifeq:
3245         // If this is a backwards branch in the bytecodes, add Safepoint
3246         maybe_add_safepoint(iter().next_get_dest());
3247       default:
3248         break;
3249     }
3250     b = pop_pair();
3251     a = pop_pair();
3252     c = _gvn.transform( new CmpL3Node( a, b ));
3253     push(c);
3254     break;
3255 
3256   case Bytecodes::_lneg:
3257     a = pop_pair();
3258     b = _gvn.transform( new SubLNode(longcon(0),a));
3259     push_pair(b);
3260     break;
3261   case Bytecodes::_l2i:
3262     a = pop_pair();
3263     push( _gvn.transform( new ConvL2INode(a)));
3264     break;
3265   case Bytecodes::_i2l:
3266     a = pop();
3267     b = _gvn.transform( new ConvI2LNode(a));
3268     push_pair(b);
3269     break;
3270   case Bytecodes::_i2b:
3271     // Sign extend
3272     a = pop();
3273     a = Compile::narrow_value(T_BYTE, a, NULL, &_gvn, true);
3274     push(a);
3275     break;
3276   case Bytecodes::_i2s:
3277     a = pop();
3278     a = Compile::narrow_value(T_SHORT, a, NULL, &_gvn, true);
3279     push(a);
3280     break;
3281   case Bytecodes::_i2c:
3282     a = pop();
3283     a = Compile::narrow_value(T_CHAR, a, NULL, &_gvn, true);
3284     push(a);
3285     break;
3286 
3287   case Bytecodes::_i2f:
3288     a = pop();
3289     b = _gvn.transform( new ConvI2FNode(a) ) ;
3290     c = precision_rounding(b);
3291     push (b);
3292     break;
3293 
3294   case Bytecodes::_i2d:
3295     a = pop();
3296     b = _gvn.transform( new ConvI2DNode(a));
3297     push_pair(b);
3298     break;
3299 
3300   case Bytecodes::_iinc:        // Increment local
3301     i = iter().get_index();     // Get local index
3302     set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
3303     break;
3304 
3305   // Exit points of synchronized methods must have an unlock node
3306   case Bytecodes::_return:
3307     return_current(NULL);
3308     break;
3309 
3310   case Bytecodes::_ireturn:
3311   case Bytecodes::_areturn:
3312   case Bytecodes::_freturn:
3313     return_current(pop());
3314     break;
3315   case Bytecodes::_lreturn:
3316     return_current(pop_pair());
3317     break;
3318   case Bytecodes::_dreturn:
3319     return_current(pop_pair());
3320     break;
3321 
3322   case Bytecodes::_athrow:
3323     // null exception oop throws NULL pointer exception
3324     null_check(peek());
3325     if (stopped())  return;
3326     // Hook the thrown exception directly to subsequent handlers.
3327     if (BailoutToInterpreterForThrows) {
3328       // Keep method interpreted from now on.
3329       uncommon_trap(Deoptimization::Reason_unhandled,
3330                     Deoptimization::Action_make_not_compilable);
3331       return;
3332     }
3333     if (env()->jvmti_can_post_on_exceptions()) {
3334       // check if we must post exception events, take uncommon trap if so (with must_throw = false)
3335       uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
3336     }
3337     // Here if either can_post_on_exceptions or should_post_on_exceptions is false
3338     add_exception_state(make_exception_state(peek()));
3339     break;
3340 
3341   case Bytecodes::_goto:   // fall through
3342   case Bytecodes::_goto_w: {
3343     int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
3344 
3345     // If this is a backwards branch in the bytecodes, add Safepoint
3346     maybe_add_safepoint(target_bci);
3347 
3348     // Merge the current control into the target basic block
3349     merge(target_bci);
3350 
3351     // See if we can get some profile data and hand it off to the next block
3352     Block *target_block = block()->successor_for_bci(target_bci);
3353     if (target_block->pred_count() != 1)  break;
3354     ciMethodData* methodData = method()->method_data();
3355     if (!methodData->is_mature())  break;
3356     ciProfileData* data = methodData->bci_to_data(bci());
3357     assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
3358     int taken = ((ciJumpData*)data)->taken();
3359     taken = method()->scale_count(taken);
3360     target_block->set_count(taken);
3361     break;
3362   }
3363 
3364   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
3365   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3366   handle_if_null:
3367     // If this is a backwards branch in the bytecodes, add Safepoint
3368     maybe_add_safepoint(iter().get_dest());
3369     a = null();
3370     b = pop();
3371     if (b->is_InlineType()) {
3372       // Null checking a scalarized but nullable inline type. Check the IsInit
3373       // input instead of the oop input to avoid keeping buffer allocations alive
3374       c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT)));
3375     } else {
3376       if (!_gvn.type(b)->speculative_maybe_null() &&
3377           !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3378         inc_sp(1);
3379         Node* null_ctl = top();
3380         b = null_check_oop(b, &null_ctl, true, true, true);
3381         assert(null_ctl->is_top(), "no null control here");
3382         dec_sp(1);
3383       } else if (_gvn.type(b)->speculative_always_null() &&
3384                  !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3385         inc_sp(1);
3386         b = null_assert(b);
3387         dec_sp(1);
3388       }
3389       c = _gvn.transform( new CmpPNode(b, a) );
3390     }
3391     do_ifnull(btest, c);
3392     break;
3393 
3394   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3395   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3396   handle_if_acmp:
3397     // If this is a backwards branch in the bytecodes, add Safepoint
3398     maybe_add_safepoint(iter().get_dest());
3399     a = pop();
3400     b = pop();
3401     do_acmp(btest, b, a);
3402     break;
3403 
3404   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3405   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3406   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3407   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3408   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3409   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3410   handle_ifxx:
3411     // If this is a backwards branch in the bytecodes, add Safepoint
3412     maybe_add_safepoint(iter().get_dest());
3413     a = _gvn.intcon(0);
3414     b = pop();
3415     c = _gvn.transform( new CmpINode(b, a) );
3416     do_if(btest, c);
3417     break;
3418 
3419   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3420   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3421   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3422   case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
3423   case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
3424   case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
3425   handle_if_icmp:
3426     // If this is a backwards branch in the bytecodes, add Safepoint
3427     maybe_add_safepoint(iter().get_dest());
3428     a = pop();
3429     b = pop();
3430     c = _gvn.transform( new CmpINode( b, a ) );
3431     do_if(btest, c);
3432     break;
3433 
3434   case Bytecodes::_tableswitch:
3435     do_tableswitch();
3436     break;
3437 
3438   case Bytecodes::_lookupswitch:
3439     do_lookupswitch();
3440     break;
3441 
3442   case Bytecodes::_invokestatic:
3443   case Bytecodes::_invokedynamic:
3444   case Bytecodes::_invokespecial:
3445   case Bytecodes::_invokevirtual:
3446   case Bytecodes::_invokeinterface:
3447     do_call();
3448     break;
3449   case Bytecodes::_checkcast:
3450     do_checkcast();
3451     break;
3452   case Bytecodes::_instanceof:
3453     do_instanceof();
3454     break;
3455   case Bytecodes::_anewarray:
3456     do_newarray();
3457     break;
3458   case Bytecodes::_newarray:
3459     do_newarray((BasicType)iter().get_index());
3460     break;
3461   case Bytecodes::_multianewarray:
3462     do_multianewarray();
3463     break;
3464   case Bytecodes::_new:
3465     do_new();
3466     break;
3467   case Bytecodes::_aconst_init:
3468     do_aconst_init();
3469     break;
3470   case Bytecodes::_withfield:
3471     do_withfield();
3472     break;
3473 
3474   case Bytecodes::_jsr:
3475   case Bytecodes::_jsr_w:
3476     do_jsr();
3477     break;
3478 
3479   case Bytecodes::_ret:
3480     do_ret();
3481     break;
3482 
3483 
3484   case Bytecodes::_monitorenter:
3485     do_monitor_enter();
3486     break;
3487 
3488   case Bytecodes::_monitorexit:
3489     do_monitor_exit();
3490     break;
3491 
3492   case Bytecodes::_breakpoint:
3493     // Breakpoint set concurrently to compile
3494     // %%% use an uncommon trap?
3495     C->record_failure("breakpoint in method");
3496     return;
3497 
3498   default:
3499 #ifndef PRODUCT
3500     map()->dump(99);
3501 #endif
3502     tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
3503     ShouldNotReachHere();
3504   }
3505 
3506 #ifndef PRODUCT
3507   if (C->should_print_igv(1)) {
3508     IdealGraphPrinter* printer = C->igv_printer();
3509     char buffer[256];
3510     jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
3511     bool old = printer->traverse_outs();
3512     printer->set_traverse_outs(true);
3513     printer->print_method(buffer, 4);
3514     printer->set_traverse_outs(old);
3515   }
3516 #endif
3517 }