< prev index next >

src/hotspot/share/opto/macro.cpp

Print this page




  30 #include "opto/addnode.hpp"
  31 #include "opto/arraycopynode.hpp"
  32 #include "opto/callnode.hpp"
  33 #include "opto/castnode.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/compile.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/graphKit.hpp"
  38 #include "opto/locknode.hpp"
  39 #include "opto/loopnode.hpp"
  40 #include "opto/macro.hpp"
  41 #include "opto/memnode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/node.hpp"
  44 #include "opto/opaquenode.hpp"
  45 #include "opto/phaseX.hpp"
  46 #include "opto/rootnode.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "opto/subnode.hpp"
  49 #include "opto/type.hpp"

  50 #include "runtime/sharedRuntime.hpp"
  51 #include "utilities/macros.hpp"
  52 #if INCLUDE_G1GC
  53 #include "gc/g1/g1ThreadLocalData.hpp"
  54 #endif // INCLUDE_G1GC
  55 #if INCLUDE_SHENANDOAHGC
  56 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  57 #endif
  58 
  59 
  60 //
  61 // Replace any references to "oldref" in inputs to "use" with "newref".
  62 // Returns the number of replacements made.
  63 //
  64 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  65   int nreplacements = 0;
  66   uint req = use->req();
  67   for (uint j = 0; j < use->len(); j++) {
  68     Node *uin = use->in(j);
  69     if (uin == oldref) {
  70       if (j < req)
  71         use->set_req(j, newref);
  72       else
  73         use->set_prec(j, newref);
  74       nreplacements++;
  75     } else if (j >= req && uin == NULL) {
  76       break;
  77     }
  78   }
  79   return nreplacements;
  80 }
  81 
  82 void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) {
  83   // Copy debug information and adjust JVMState information
  84   uint old_dbg_start = oldcall->tf()->domain()->cnt();
  85   uint new_dbg_start = newcall->tf()->domain()->cnt();
  86   int jvms_adj  = new_dbg_start - old_dbg_start;
  87   assert (new_dbg_start == newcall->req(), "argument count mismatch");
  88 
  89   // SafePointScalarObject node could be referenced several times in debug info.
  90   // Use Dict to record cloned nodes.
  91   Dict* sosn_map = new Dict(cmpkey,hashkey);
  92   for (uint i = old_dbg_start; i < oldcall->req(); i++) {
  93     Node* old_in = oldcall->in(i);
  94     // Clone old SafePointScalarObjectNodes, adjusting their field contents.
  95     if (old_in != NULL && old_in->is_SafePointScalarObject()) {
  96       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
  97       uint old_unique = C->unique();
  98       Node* new_in = old_sosn->clone(sosn_map);
  99       if (old_unique != C->unique()) { // New node?
 100         new_in->set_req(0, C->root()); // reset control edge
 101         new_in = transform_later(new_in); // Register new node.
 102       }
 103       old_in = new_in;
 104     }
 105     newcall->add_req(old_in);


 260           if (call->as_ArrayCopy()->modifies(offset, offset, phase, false)) {
 261             return in;
 262           }
 263         }
 264         mem = in->in(TypeFunc::Memory);
 265       } else if (in->is_MemBar()) {
 266         ArrayCopyNode* ac = NULL;
 267         if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) {
 268           assert(ac != NULL && ac->is_clonebasic(), "Only basic clone is a non escaping clone");
 269           return ac;
 270         }
 271         mem = in->in(TypeFunc::Memory);
 272       } else {
 273         assert(false, "unexpected projection");
 274       }
 275     } else if (mem->is_Store()) {
 276       const TypePtr* atype = mem->as_Store()->adr_type();
 277       int adr_idx = phase->C->get_alias_index(atype);
 278       if (adr_idx == alias_idx) {
 279         assert(atype->isa_oopptr(), "address type must be oopptr");
 280         int adr_offset = atype->offset();
 281         uint adr_iid = atype->is_oopptr()->instance_id();
 282         // Array elements references have the same alias_idx
 283         // but different offset and different instance_id.
 284         if (adr_offset == offset && adr_iid == alloc->_idx)
 285           return mem;
 286       } else {
 287         assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
 288       }
 289       mem = mem->in(MemNode::Memory);
 290     } else if (mem->is_ClearArray()) {
 291       if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
 292         // Can not bypass initialization of the instance
 293         // we are looking.
 294         debug_only(intptr_t offset;)
 295         assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
 296         InitializeNode* init = alloc->as_Allocate()->initialization();
 297         // We are looking for stored value, return Initialize node
 298         // or memory edge from Allocate node.
 299         if (init != NULL)
 300           return init;


 303       }
 304       // Otherwise skip it (the call updated 'mem' value).
 305     } else if (mem->Opcode() == Op_SCMemProj) {
 306       mem = mem->in(0);
 307       Node* adr = NULL;
 308       if (mem->is_LoadStore()) {
 309         adr = mem->in(MemNode::Address);
 310       } else {
 311         assert(mem->Opcode() == Op_EncodeISOArray ||
 312                mem->Opcode() == Op_StrCompressedCopy, "sanity");
 313         adr = mem->in(3); // Destination array
 314       }
 315       const TypePtr* atype = adr->bottom_type()->is_ptr();
 316       int adr_idx = phase->C->get_alias_index(atype);
 317       if (adr_idx == alias_idx) {
 318         DEBUG_ONLY(mem->dump();)
 319         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 320         return NULL;
 321       }
 322       mem = mem->in(MemNode::Memory);
 323    } else if (mem->Opcode() == Op_StrInflatedCopy) {
 324       Node* adr = mem->in(3); // Destination array
 325       const TypePtr* atype = adr->bottom_type()->is_ptr();
 326       int adr_idx = phase->C->get_alias_index(atype);
 327       if (adr_idx == alias_idx) {
 328         DEBUG_ONLY(mem->dump();)
 329         assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
 330         return NULL;
 331       }
 332       mem = mem->in(MemNode::Memory);
 333     } else {
 334       return mem;
 335     }
 336     assert(mem != orig_mem, "dead memory loop");
 337   }
 338 }
 339 
 340 // Generate loads from source of the arraycopy for fields of
 341 // destination needed at a deoptimization point
 342 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc) {
 343   BasicType bt = ft;
 344   const Type *type = ftype;
 345   if (ft == T_NARROWOOP) {
 346     bt = T_OBJECT;
 347     type = ftype->make_oopptr();
 348   }
 349   Node* res = NULL;
 350   if (ac->is_clonebasic()) {
 351     Node* base = ac->in(ArrayCopyNode::Src)->in(AddPNode::Base);
 352     Node* adr = _igvn.transform(new AddPNode(base, base, MakeConX(offset)));
 353     const TypePtr* adr_type = _igvn.type(base)->is_ptr()->add_offset(offset);
 354     res = LoadNode::make(_igvn, ctl, mem, adr, adr_type, type, bt, MemNode::unordered, LoadNode::Pinned);
 355   } else {
 356     if (ac->modifies(offset, offset, &_igvn, true)) {
 357       assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result");
 358       uint shift  = exact_log2(type2aelembytes(bt));
 359       Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
 360 #ifdef _LP64
 361       diff = _igvn.transform(new ConvI2LNode(diff));
 362 #endif
 363       diff = _igvn.transform(new LShiftXNode(diff, intcon(shift)));
 364 
 365       Node* off = _igvn.transform(new AddXNode(MakeConX(offset), diff));
 366       Node* base = ac->in(ArrayCopyNode::Src);
 367       Node* adr = _igvn.transform(new AddPNode(base, base, off));
 368       const TypePtr* adr_type = _igvn.type(base)->is_ptr()->add_offset(offset);









 369       res = LoadNode::make(_igvn, ctl, mem, adr, adr_type, type, bt, MemNode::unordered, LoadNode::Pinned);
 370     }
 371   }
 372   if (res != NULL) {
 373     res = _igvn.transform(res);
 374     if (ftype->isa_narrowoop()) {
 375       // PhaseMacroExpand::scalar_replacement adds DecodeN nodes

 376       res = _igvn.transform(new EncodePNode(res, ftype));
 377     }
 378     return res;
 379   }
 380   return NULL;
 381 }
 382 
 383 //
 384 // Given a Memory Phi, compute a value Phi containing the values from stores
 385 // on the input paths.
 386 // Note: this function is recursive, its depth is limited by the "level" argument
 387 // Returns the computed Phi, or NULL if it cannot compute it.
 388 Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
 389   assert(mem->is_Phi(), "sanity");
 390   int alias_idx = C->get_alias_index(adr_t);
 391   int offset = adr_t->offset();
 392   int instance_id = adr_t->instance_id();
 393 
 394   // Check if an appropriate value phi already exists.
 395   Node* region = mem->in(0);
 396   for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
 397     Node* phi = region->fast_out(k);
 398     if (phi->is_Phi() && phi != mem &&
 399         phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) {
 400       return phi;
 401     }
 402   }
 403   // Check if an appropriate new value phi already exists.
 404   Node* new_phi = value_phis->find(mem->_idx);
 405   if (new_phi != NULL)
 406     return new_phi;
 407 
 408   if (level <= 0) {
 409     return NULL; // Give up: phi tree too deep
 410   }
 411   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
 412   Node *alloc_mem = alloc->in(TypeFunc::Memory);
 413 
 414   uint length = mem->req();
 415   GrowableArray <Node *> values(length, length, NULL, false);
 416 
 417   // create a new Phi for the value
 418   PhiNode *phi = new PhiNode(mem->in(0), phi_type, NULL, mem->_idx, instance_id, alias_idx, offset);
 419   transform_later(phi);
 420   value_phis->push(phi, mem->_idx);
 421 
 422   for (uint j = 1; j < length; j++) {
 423     Node *in = mem->in(j);
 424     if (in == NULL || in->is_top()) {
 425       values.at_put(j, in);
 426     } else  {
 427       Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
 428       if (val == start_mem || val == alloc_mem) {
 429         // hit a sentinel, return appropriate 0 value
 430         values.at_put(j, _igvn.zerocon(ft));






 431         continue;
 432       }
 433       if (val->is_Initialize()) {
 434         val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 435       }
 436       if (val == NULL) {
 437         return NULL;  // can't find a value on this path
 438       }
 439       if (val == mem) {
 440         values.at_put(j, mem);
 441       } else if (val->is_Store()) {
 442         Node* n = val->in(MemNode::ValueIn);
 443         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 444         n = bs->step_over_gc_barrier(n);
 445         values.at_put(j, n);
 446       } else if(val->is_Proj() && val->in(0) == alloc) {
 447         values.at_put(j, _igvn.zerocon(ft));






 448       } else if (val->is_Phi()) {
 449         val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
 450         if (val == NULL) {
 451           return NULL;
 452         }
 453         values.at_put(j, val);
 454       } else if (val->Opcode() == Op_SCMemProj) {
 455         assert(val->in(0)->is_LoadStore() ||
 456                val->in(0)->Opcode() == Op_EncodeISOArray ||
 457                val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
 458         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 459         return NULL;
 460       } else if (val->is_ArrayCopy()) {
 461         Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
 462         if (res == NULL) {
 463           return NULL;
 464         }
 465         values.at_put(j, res);
 466       } else {
 467 #ifdef ASSERT


 473     }
 474   }
 475   // Set Phi's inputs
 476   for (uint j = 1; j < length; j++) {
 477     if (values.at(j) == mem) {
 478       phi->init_req(j, phi);
 479     } else {
 480       phi->init_req(j, values.at(j));
 481     }
 482   }
 483   return phi;
 484 }
 485 
 486 // Search the last value stored into the object's field.
 487 Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc) {
 488   assert(adr_t->is_known_instance_field(), "instance required");
 489   int instance_id = adr_t->instance_id();
 490   assert((uint)instance_id == alloc->_idx, "wrong allocation");
 491 
 492   int alias_idx = C->get_alias_index(adr_t);
 493   int offset = adr_t->offset();
 494   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
 495   Node *alloc_ctrl = alloc->in(TypeFunc::Control);
 496   Node *alloc_mem = alloc->in(TypeFunc::Memory);
 497   Arena *a = Thread::current()->resource_area();
 498   VectorSet visited(a);
 499 
 500 
 501   bool done = sfpt_mem == alloc_mem;
 502   Node *mem = sfpt_mem;
 503   while (!done) {
 504     if (visited.test_set(mem->_idx)) {
 505       return NULL;  // found a loop, give up
 506     }
 507     mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
 508     if (mem == start_mem || mem == alloc_mem) {
 509       done = true;  // hit a sentinel, return appropriate 0 value
 510     } else if (mem->is_Initialize()) {
 511       mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 512       if (mem == NULL) {
 513         done = true; // Something go wrong.
 514       } else if (mem->is_Store()) {
 515         const TypePtr* atype = mem->as_Store()->adr_type();
 516         assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
 517         done = true;
 518       }
 519     } else if (mem->is_Store()) {
 520       const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
 521       assert(atype != NULL, "address type must be oopptr");
 522       assert(C->get_alias_index(atype) == alias_idx &&
 523              atype->is_known_instance_field() && atype->offset() == offset &&
 524              atype->instance_id() == instance_id, "store is correct memory slice");
 525       done = true;
 526     } else if (mem->is_Phi()) {
 527       // try to find a phi's unique input
 528       Node *unique_input = NULL;
 529       Node *top = C->top();
 530       for (uint i = 1; i < mem->req(); i++) {
 531         Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
 532         if (n == NULL || n == top || n == mem) {
 533           continue;
 534         } else if (unique_input == NULL) {
 535           unique_input = n;
 536         } else if (unique_input != n) {
 537           unique_input = top;
 538           break;
 539         }
 540       }
 541       if (unique_input != NULL && unique_input != top) {
 542         mem = unique_input;
 543       } else {
 544         done = true;
 545       }
 546     } else if (mem->is_ArrayCopy()) {
 547       done = true;
 548     } else {
 549       assert(false, "unexpected node");
 550     }
 551   }
 552   if (mem != NULL) {
 553     if (mem == start_mem || mem == alloc_mem) {
 554       // hit a sentinel, return appropriate 0 value





 555       return _igvn.zerocon(ft);
 556     } else if (mem->is_Store()) {
 557       Node* n = mem->in(MemNode::ValueIn);
 558       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 559       n = bs->step_over_gc_barrier(n);
 560       return n;
 561     } else if (mem->is_Phi()) {
 562       // attempt to produce a Phi reflecting the values on the input paths of the Phi
 563       Node_Stack value_phis(a, 8);
 564       Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
 565       if (phi != NULL) {
 566         return phi;
 567       } else {
 568         // Kill all new Phis
 569         while(value_phis.is_nonempty()) {
 570           Node* n = value_phis.node();
 571           _igvn.replace_node(n, C->top());
 572           value_phis.pop();
 573         }
 574       }
 575     } else if (mem->is_ArrayCopy()) {
 576       Node* ctl = mem->in(0);
 577       Node* m = mem->in(TypeFunc::Memory);
 578       if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 579         // pin the loads in the uncommon trap path
 580         ctl = sfpt_ctl;
 581         m = sfpt_mem;
 582       }
 583       return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc);
 584     }
 585   }
 586   // Something go wrong.
 587   return NULL;
 588 }
 589 





































 590 // Check the possibility of scalar replacement.
 591 bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
 592   //  Scan the uses of the allocation to check for anything that would
 593   //  prevent us from eliminating it.
 594   NOT_PRODUCT( const char* fail_eliminate = NULL; )
 595   DEBUG_ONLY( Node* disq_node = NULL; )
 596   bool  can_eliminate = true;
 597 
 598   Node* res = alloc->result_cast();
 599   const TypeOopPtr* res_type = NULL;
 600   if (res == NULL) {
 601     // All users were eliminated.
 602   } else if (!res->is_CheckCastPP()) {
 603     NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
 604     can_eliminate = false;
 605   } else {
 606     res_type = _igvn.type(res)->isa_oopptr();
 607     if (res_type == NULL) {
 608       NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
 609       can_eliminate = false;


 625         const TypePtr* addp_type = _igvn.type(use)->is_ptr();
 626         int offset = addp_type->offset();
 627 
 628         if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
 629           NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
 630           can_eliminate = false;
 631           break;
 632         }
 633         for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
 634                                    k < kmax && can_eliminate; k++) {
 635           Node* n = use->fast_out(k);
 636           if (!n->is_Store() && n->Opcode() != Op_CastP2X &&
 637               SHENANDOAHGC_ONLY((!UseShenandoahGC || !ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(n)) &&)
 638               !(n->is_ArrayCopy() &&
 639                 n->as_ArrayCopy()->is_clonebasic() &&
 640                 n->in(ArrayCopyNode::Dest) == use)) {
 641             DEBUG_ONLY(disq_node = n;)
 642             if (n->is_Load() || n->is_LoadStore()) {
 643               NOT_PRODUCT(fail_eliminate = "Field load";)
 644             } else {
 645               NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
 646             }
 647             can_eliminate = false;
 648           }
 649         }
 650       } else if (use->is_ArrayCopy() &&
 651                  (use->as_ArrayCopy()->is_arraycopy_validated() ||
 652                   use->as_ArrayCopy()->is_copyof_validated() ||
 653                   use->as_ArrayCopy()->is_copyofrange_validated()) &&
 654                  use->in(ArrayCopyNode::Dest) == res) {
 655         // ok to eliminate
 656       } else if (use->is_SafePoint()) {
 657         SafePointNode* sfpt = use->as_SafePoint();
 658         if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
 659           // Object is passed as argument.
 660           DEBUG_ONLY(disq_node = use;)
 661           NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
 662           can_eliminate = false;
 663         }
 664         Node* sfptMem = sfpt->memory();
 665         if (sfptMem == NULL || sfptMem->is_top()) {
 666           DEBUG_ONLY(disq_node = use;)
 667           NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";)
 668           can_eliminate = false;
 669         } else {
 670           safepoints.append_if_missing(sfpt);
 671         }




 672       } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
 673         if (use->is_Phi()) {
 674           if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
 675             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 676           } else {
 677             NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
 678           }
 679           DEBUG_ONLY(disq_node = use;)
 680         } else {
 681           if (use->Opcode() == Op_Return) {
 682             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 683           }else {
 684             NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
 685           }
 686           DEBUG_ONLY(disq_node = use;)
 687         }
 688         can_eliminate = false;



 689       }
 690     }
 691   }
 692 
 693 #ifndef PRODUCT
 694   if (PrintEliminateAllocations) {
 695     if (can_eliminate) {
 696       tty->print("Scalar ");
 697       if (res == NULL)
 698         alloc->dump();
 699       else
 700         res->dump();
 701     } else if (alloc->_is_scalar_replaceable) {
 702       tty->print("NotScalar (%s)", fail_eliminate);
 703       if (res == NULL)
 704         alloc->dump();
 705       else
 706         res->dump();
 707 #ifdef ASSERT
 708       if (disq_node != NULL) {


 731   Node* res = alloc->result_cast();
 732   assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
 733   const TypeOopPtr* res_type = NULL;
 734   if (res != NULL) { // Could be NULL when there are no users
 735     res_type = _igvn.type(res)->isa_oopptr();
 736   }
 737 
 738   if (res != NULL) {
 739     klass = res_type->klass();
 740     if (res_type->isa_instptr()) {
 741       // find the fields of the class which will be needed for safepoint debug information
 742       assert(klass->is_instance_klass(), "must be an instance klass.");
 743       iklass = klass->as_instance_klass();
 744       nfields = iklass->nof_nonstatic_fields();
 745     } else {
 746       // find the array's elements which will be needed for safepoint debug information
 747       nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
 748       assert(klass->is_array_klass() && nfields >= 0, "must be an array klass.");
 749       elem_type = klass->as_array_klass()->element_type();
 750       basic_elem_type = elem_type->basic_type();




 751       array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
 752       element_size = type2aelembytes(basic_elem_type);




 753     }
 754   }
 755   //
 756   // Process the safepoint uses
 757   //

 758   while (safepoints.length() > 0) {
 759     SafePointNode* sfpt = safepoints.pop();
 760     Node* mem = sfpt->memory();
 761     Node* ctl = sfpt->control();
 762     assert(sfpt->jvms() != NULL, "missed JVMS");
 763     // Fields of scalar objs are referenced only at the end
 764     // of regular debuginfo at the last (youngest) JVMS.
 765     // Record relative start index.
 766     uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
 767     SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type,
 768 #ifdef ASSERT
 769                                                  alloc,
 770 #endif
 771                                                  first_ind, nfields);
 772     sobj->init_req(0, C->root());
 773     transform_later(sobj);
 774 
 775     // Scan object's fields adding an input to the safepoint for each field.
 776     for (int j = 0; j < nfields; j++) {
 777       intptr_t offset;
 778       ciField* field = NULL;
 779       if (iklass != NULL) {
 780         field = iklass->nonstatic_field_at(j);
 781         offset = field->offset();
 782         elem_type = field->type();
 783         basic_elem_type = field->layout_type();

 784       } else {
 785         offset = array_base + j * (intptr_t)element_size;
 786       }
 787 
 788       const Type *field_type;
 789       // The next code is taken from Parse::do_get_xxx().
 790       if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) {
 791         if (!elem_type->is_loaded()) {
 792           field_type = TypeInstPtr::BOTTOM;
 793         } else if (field != NULL && field->is_static_constant()) {
 794           // This can happen if the constant oop is non-perm.
 795           ciObject* con = field->constant_value().as_object();
 796           // Do not "join" in the previous type; it doesn't add value,
 797           // and may yield a vacuous result if the field is of interface type.
 798           field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 799           assert(field_type != NULL, "field singleton type must be consistent");
 800         } else {
 801           field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
 802         }
 803         if (UseCompressedOops) {
 804           field_type = field_type->make_narrowoop();
 805           basic_elem_type = T_NARROWOOP;
 806         }
 807       } else {
 808         field_type = Type::get_const_basic_type(basic_elem_type);
 809       }
 810 
 811       const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr();
 812 
 813       Node *field_val = value_from_mem(mem, ctl, basic_elem_type, field_type, field_addr_type, alloc);






 814       if (field_val == NULL) {
 815         // We weren't able to find a value for this field,
 816         // give up on eliminating this allocation.
 817 
 818         // Remove any extra entries we added to the safepoint.
 819         uint last = sfpt->req() - 1;
 820         for (int k = 0;  k < j; k++) {
 821           sfpt->del_req(last--);
 822         }
 823         _igvn._worklist.push(sfpt);
 824         // rollback processed safepoints
 825         while (safepoints_done.length() > 0) {
 826           SafePointNode* sfpt_done = safepoints_done.pop();
 827           // remove any extra entries we added to the safepoint
 828           last = sfpt_done->req() - 1;
 829           for (int k = 0;  k < nfields; k++) {
 830             sfpt_done->del_req(last--);
 831           }
 832           JVMState *jvms = sfpt_done->jvms();
 833           jvms->set_endoff(sfpt_done->req());


 859             tty->print("=== At SafePoint node %d can't find value of array element [%d]",
 860                        sfpt->_idx, j);
 861           }
 862           tty->print(", which prevents elimination of: ");
 863           if (res == NULL)
 864             alloc->dump();
 865           else
 866             res->dump();
 867         }
 868 #endif
 869         return false;
 870       }
 871       if (UseCompressedOops && field_type->isa_narrowoop()) {
 872         // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
 873         // to be able scalar replace the allocation.
 874         if (field_val->is_EncodeP()) {
 875           field_val = field_val->in(1);
 876         } else {
 877           field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
 878         }



 879       }
 880       sfpt->add_req(field_val);
 881     }
 882     JVMState *jvms = sfpt->jvms();
 883     jvms->set_endoff(sfpt->req());
 884     // Now make a pass over the debug information replacing any references
 885     // to the allocated object with "sobj"
 886     int start = jvms->debug_start();
 887     int end   = jvms->debug_end();
 888     sfpt->replace_edges_in_range(res, sobj, start, end);
 889     _igvn._worklist.push(sfpt);
 890     safepoints_done.append_if_missing(sfpt); // keep it for rollback
 891   }





 892   return true;
 893 }
 894 
 895 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
 896   Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
 897   Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
 898   if (ctl_proj != NULL) {
 899     igvn.replace_node(ctl_proj, n->in(0));
 900   }
 901   if (mem_proj != NULL) {
 902     igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
 903   }
 904 }
 905 
 906 // Process users of eliminated allocation.
 907 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
 908   Node* res = alloc->result_cast();
 909   if (res != NULL) {
 910     for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
 911       Node *use = res->last_out(j);


 936             Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out();
 937             disconnect_projections(ac, _igvn);
 938             assert(alloc->in(0)->is_Proj() && alloc->in(0)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation");
 939             Node* membar_before = alloc->in(0)->in(0);
 940             disconnect_projections(membar_before->as_MemBar(), _igvn);
 941             if (membar_after->is_MemBar()) {
 942               disconnect_projections(membar_after->as_MemBar(), _igvn);
 943             }
 944           } else {
 945             eliminate_gc_barrier(n);
 946           }
 947           k -= (oc2 - use->outcnt());
 948         }
 949         _igvn.remove_dead_node(use);
 950       } else if (use->is_ArrayCopy()) {
 951         // Disconnect ArrayCopy node
 952         ArrayCopyNode* ac = use->as_ArrayCopy();
 953         assert(ac->is_arraycopy_validated() ||
 954                ac->is_copyof_validated() ||
 955                ac->is_copyofrange_validated(), "unsupported");
 956         CallProjections callprojs;
 957         ac->extract_projections(&callprojs, true);
 958 
 959         _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O));
 960         _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory));
 961         _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control));
 962 
 963         // Set control to top. IGVN will remove the remaining projections
 964         ac->set_req(0, top());
 965         ac->replace_edge(res, top());
 966 
 967         // Disconnect src right away: it can help find new
 968         // opportunities for allocation elimination
 969         Node* src = ac->in(ArrayCopyNode::Src);
 970         ac->replace_edge(src, top());
 971         // src can be top at this point if src and dest of the
 972         // arraycopy were the same
 973         if (src->outcnt() == 0 && !src->is_top()) {
 974           _igvn.remove_dead_node(src);
 975         }
 976 
 977         _igvn._worklist.push(ac);






 978       } else {
 979         eliminate_gc_barrier(use);
 980       }
 981       j -= (oc1 - res->outcnt());
 982     }
 983     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
 984     _igvn.remove_dead_node(res);
 985   }
 986 
 987   //
 988   // Process other users of allocation's projections
 989   //
 990   if (_resproj != NULL && _resproj->outcnt() != 0) {
 991     // First disconnect stores captured by Initialize node.
 992     // If Initialize node is eliminated first in the following code,
 993     // it will kill such stores and DUIterator_Last will assert.
 994     for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax);  j < jmax; j++) {
 995       Node *use = _resproj->fast_out(j);
 996       if (use->is_AddP()) {
 997         // raw memory addresses used only by the initialization


1109     if (alloc->is_AllocateArray())
1110       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1111     else
1112       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1113   }
1114 #endif
1115 
1116   return true;
1117 }
1118 
1119 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1120   // EA should remove all uses of non-escaping boxing node.
1121   if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != NULL) {
1122     return false;
1123   }
1124 
1125   assert(boxing->result_cast() == NULL, "unexpected boxing node result");
1126 
1127   extract_call_projections(boxing);
1128 
1129   const TypeTuple* r = boxing->tf()->range();
1130   assert(r->cnt() > TypeFunc::Parms, "sanity");
1131   const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1132   assert(t != NULL, "sanity");
1133 
1134   CompileLog* log = C->log();
1135   if (log != NULL) {
1136     log->head("eliminate_boxing type='%d'",
1137               log->identify(t->klass()));
1138     JVMState* p = boxing->jvms();
1139     while (p != NULL) {
1140       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1141       p = p->caller();
1142     }
1143     log->tail("eliminate_boxing");
1144   }
1145 
1146   process_users_of_allocation(boxing);
1147 
1148 #ifndef PRODUCT
1149   if (PrintEliminateAllocations) {


1269   Node *result_phi_i_o = NULL;
1270 
1271   // The initial slow comparison is a size check, the comparison
1272   // we want to do is a BoolTest::gt
1273   bool always_slow = false;
1274   int tv = _igvn.find_int_con(initial_slow_test, -1);
1275   if (tv >= 0) {
1276     always_slow = (tv == 1);
1277     initial_slow_test = NULL;
1278   } else {
1279     initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn);
1280   }
1281 
1282   if (C->env()->dtrace_alloc_probes() ||
1283       (!UseTLAB && !Universe::heap()->supports_inline_contig_alloc())) {
1284     // Force slow-path allocation
1285     always_slow = true;
1286     initial_slow_test = NULL;
1287   }
1288 
1289 
1290   enum { too_big_or_final_path = 1, need_gc_path = 2 };
1291   Node *slow_region = NULL;
1292   Node *toobig_false = ctrl;
1293 
1294   assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent");
1295   // generate the initial test if necessary
1296   if (initial_slow_test != NULL ) {
1297     slow_region = new RegionNode(3);
1298 

1299     // Now make the initial failure test.  Usually a too-big test but
1300     // might be a TRUE for finalizers or a fancy class check for
1301     // newInstance0.
1302     IfNode *toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
1303     transform_later(toobig_iff);
1304     // Plug the failing-too-big test into the slow-path region
1305     Node *toobig_true = new IfTrueNode( toobig_iff );
1306     transform_later(toobig_true);
1307     slow_region    ->init_req( too_big_or_final_path, toobig_true );
1308     toobig_false = new IfFalseNode( toobig_iff );
1309     transform_later(toobig_false);
1310   } else {         // No initial test, just fall into next case
1311     toobig_false = ctrl;
1312     debug_only(slow_region = NodeSentinel);
1313   }
1314 
1315   Node *slow_mem = mem;  // save the current memory state for slow path
1316   // generate the fast allocation code unless we know that the initial test will always go slow
1317   if (!always_slow) {
1318     // Fast path modifies only raw memory.
1319     if (mem->is_MergeMem()) {
1320       mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1321     }
1322 
1323     // allocate the Region and Phi nodes for the result
1324     result_region = new RegionNode(3);
1325     result_phi_rawmem = new PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1326     result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
1327     result_phi_i_o    = new PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
1328 
1329     // Grab regular I/O before optional prefetch may change it.
1330     // Slow-path does no I/O so just set it to the original I/O.
1331     result_phi_i_o->init_req(slow_result_path, i_o);
1332 
1333     Node* needgc_ctrl = NULL;
1334     // Name successful fast-path variables
1335     Node* fast_oop_ctrl;
1336     Node* fast_oop_rawmem;
1337 
1338     intx prefetch_lines = length != NULL ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
1339 
1340     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1341     Node* fast_oop = bs->obj_allocate(this, ctrl, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
1342                                       fast_oop_ctrl, fast_oop_rawmem,
1343                                       prefetch_lines);
1344 
1345     if (initial_slow_test) {
1346       slow_region->init_req(need_gc_path, needgc_ctrl);
1347       // This completes all paths into the slow merge point
1348       transform_later(slow_region);
1349     } else {                      // No initial slow path needed!
1350       // Just fall from the need-GC path straight into the VM call.
1351       slow_region = needgc_ctrl;
1352     }
1353 
1354     InitializeNode* init = alloc->initialization();
1355     fast_oop_rawmem = initialize_object(alloc,
1356                                         fast_oop_ctrl, fast_oop_rawmem, fast_oop,
1357                                         klass_node, length, size_in_bytes);
1358 
1359     // If initialization is performed by an array copy, any required
1360     // MemBarStoreStore was already added. If the object does not
1361     // escape no need for a MemBarStoreStore. If the object does not
1362     // escape in its initializer and memory barrier (MemBarStoreStore or
1363     // stronger) is already added at exit of initializer, also no need
1364     // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
1365     // so that stores that initialize this object can't be reordered
1366     // with a subsequent store that makes this object accessible by
1367     // other threads.
1368     // Other threads include java threads and JVM internal threads
1369     // (for example concurrent GC threads). Current concurrent GC


1564     assert(_ioproj_catchall->outcnt() == 0, "all uses must be deleted");
1565     _igvn.remove_dead_node(_ioproj_catchall);
1566   }
1567 
1568   // if we generated only a slow call, we are done
1569   if (always_slow) {
1570     // Now we can unhook i_o.
1571     if (result_phi_i_o->outcnt() > 1) {
1572       call->set_req(TypeFunc::I_O, top());
1573     } else {
1574       assert(result_phi_i_o->unique_ctrl_out() == call, "");
1575       // Case of new array with negative size known during compilation.
1576       // AllocateArrayNode::Ideal() optimization disconnect unreachable
1577       // following code since call to runtime will throw exception.
1578       // As result there will be no users of i_o after the call.
1579       // Leave i_o attached to this call to avoid problems in preceding graph.
1580     }
1581     return;
1582   }
1583 
1584 
1585   if (_fallthroughcatchproj != NULL) {
1586     ctrl = _fallthroughcatchproj->clone();
1587     transform_later(ctrl);
1588     _igvn.replace_node(_fallthroughcatchproj, result_region);
1589   } else {
1590     ctrl = top();
1591   }
1592   Node *slow_result;
1593   if (_resproj == NULL) {
1594     // no uses of the allocation result
1595     slow_result = top();
1596   } else {
1597     slow_result = _resproj->clone();
1598     transform_later(slow_result);
1599     _igvn.replace_node(_resproj, result_phi_rawoop);
1600   }
1601 
1602   // Plug slow-path into result merge point
1603   result_region    ->init_req( slow_result_path, ctrl );
1604   result_phi_rawoop->init_req( slow_result_path, slow_result);
1605   result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough );
1606   transform_later(result_region);
1607   transform_later(result_phi_rawoop);
1608   transform_later(result_phi_rawmem);
1609   transform_later(result_phi_i_o);
1610   // This completes all paths into the result merge point
1611 }
1612 
1613 
1614 // Helper for PhaseMacroExpand::expand_allocate_common.
1615 // Initializes the newly-allocated storage.
1616 Node*
1617 PhaseMacroExpand::initialize_object(AllocateNode* alloc,
1618                                     Node* control, Node* rawmem, Node* object,
1619                                     Node* klass_node, Node* length,
1620                                     Node* size_in_bytes) {
1621   InitializeNode* init = alloc->initialization();
1622   // Store the klass & mark bits
1623   Node* mark_node = NULL;
1624   // For now only enable fast locking for non-array types
1625   if (UseBiasedLocking && (length == NULL)) {
1626     mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
1627   } else {
1628     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));

















1629   }
1630   rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
1631 
1632   rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
1633   int header_size = alloc->minimum_header_size();  // conservatively small
1634 
1635   // Array length
1636   if (length != NULL) {         // Arrays need length field
1637     rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1638     // conservatively small header size:
1639     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1640     ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
1641     if (k->is_array_klass())    // we know the exact header size in most cases:
1642       header_size = Klass::layout_helper_header_size(k->layout_helper());
1643   }
1644 
1645   // Clear the object body, if necessary.
1646   if (init == NULL) {
1647     // The init has somehow disappeared; be cautious and clear everything.
1648     //
1649     // This can happen if a node is allocated but an uncommon trap occurs
1650     // immediately.  In this case, the Initialize gets associated with the
1651     // trap, and may be placed in a different (outer) loop, if the Allocate
1652     // is in a loop.  If (this is rare) the inner loop gets unrolled, then
1653     // there can be two Allocates to one Initialize.  The answer in all these
1654     // edge cases is safety first.  It is always safe to clear immediately
1655     // within an Allocate, and then (maybe or maybe not) clear some more later.
1656     if (!(UseTLAB && ZeroTLAB)) {
1657       rawmem = ClearArrayNode::clear_memory(control, rawmem, object,


1658                                             header_size, size_in_bytes,
1659                                             &_igvn);
1660     }
1661   } else {
1662     if (!init->is_complete()) {
1663       // Try to win by zeroing only what the init does not store.
1664       // We can also try to do some peephole optimizations,
1665       // such as combining some adjacent subword stores.
1666       rawmem = init->complete_stores(control, rawmem, object,
1667                                      header_size, size_in_bytes, &_igvn);
1668     }
1669     // We have no more use for this link, since the AllocateNode goes away:
1670     init->set_req(InitializeNode::RawAddress, top());
1671     // (If we keep the link, it just confuses the register allocator,
1672     // who thinks he sees a real use of the address by the membar.)
1673   }
1674 
1675   return rawmem;
1676 }
1677 


2018         // Replace old box node with new eliminated box for all users
2019         // of the same object and mark related locks as eliminated.
2020         mark_eliminated_box(box, obj);
2021       }
2022     }
2023   }
2024 }
2025 
2026 // we have determined that this lock/unlock can be eliminated, we simply
2027 // eliminate the node without expanding it.
2028 //
2029 // Note:  The membar's associated with the lock/unlock are currently not
2030 //        eliminated.  This should be investigated as a future enhancement.
2031 //
2032 bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
2033 
2034   if (!alock->is_eliminated()) {
2035     return false;
2036   }
2037 #ifdef ASSERT


2038   if (!alock->is_coarsened()) {
2039     // Check that new "eliminated" BoxLock node is created.
2040     BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
2041     assert(oldbox->is_eliminated(), "should be done already");
2042   }
2043 #endif
2044 
2045   alock->log_lock_optimization(C, "eliminate_lock");
2046 
2047 #ifndef PRODUCT
2048   if (PrintEliminateLocks) {
2049     if (alock->is_Lock()) {
2050       tty->print_cr("++++ Eliminated: %d Lock", alock->_idx);
2051     } else {
2052       tty->print_cr("++++ Eliminated: %d Unlock", alock->_idx);
2053     }
2054   }
2055 #endif
2056 
2057   Node* mem  = alock->in(TypeFunc::Memory);


2298     // region->in(2) is set to fast path - the object is locked to the current thread.
2299 
2300     slow_path->init_req(2, ctrl); // Capture slow-control
2301     slow_mem->init_req(2, fast_lock_mem_phi);
2302 
2303     transform_later(slow_path);
2304     transform_later(slow_mem);
2305     // Reset lock's memory edge.
2306     lock->set_req(TypeFunc::Memory, slow_mem);
2307 
2308   } else {
2309     region  = new RegionNode(3);
2310     // create a Phi for the memory state
2311     mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2312 
2313     // Optimize test; set region slot 2
2314     slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0);
2315     mem_phi->init_req(2, mem);
2316   }
2317 










































2318   // Make slow path call
2319   CallNode *call = make_slow_call((CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(),
2320                                   OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path,
2321                                   obj, box, NULL);
2322 
2323   extract_call_projections(call);
2324 
2325   // Slow path can only throw asynchronous exceptions, which are always
2326   // de-opted.  So the compiler thinks the slow-call can never throw an
2327   // exception.  If it DOES throw an exception we would need the debug
2328   // info removed first (since if it throws there is no monitor).
2329   assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
2330            _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
2331 
2332   // Capture slow path
2333   // disconnect fall-through projection from call and create a new one
2334   // hook up users of fall-through projection to region
2335   Node *slow_ctrl = _fallthroughproj->clone();
2336   transform_later(slow_ctrl);
2337   _igvn.hash_delete(_fallthroughproj);


2399   // No exceptions for unlocking
2400   // Capture slow path
2401   // disconnect fall-through projection from call and create a new one
2402   // hook up users of fall-through projection to region
2403   Node *slow_ctrl = _fallthroughproj->clone();
2404   transform_later(slow_ctrl);
2405   _igvn.hash_delete(_fallthroughproj);
2406   _fallthroughproj->disconnect_inputs(NULL, C);
2407   region->init_req(1, slow_ctrl);
2408   // region inputs are now complete
2409   transform_later(region);
2410   _igvn.replace_node(_fallthroughproj, region);
2411 
2412   Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) );
2413   mem_phi->init_req(1, memproj );
2414   mem_phi->init_req(2, mem);
2415   transform_later(mem_phi);
2416   _igvn.replace_node(_memproj_fallthrough, mem_phi);
2417 }
2418 













































































































































































































2419 //---------------------------eliminate_macro_nodes----------------------
2420 // Eliminate scalar replaced allocations and associated locks.
2421 void PhaseMacroExpand::eliminate_macro_nodes() {
2422   if (C->macro_count() == 0)
2423     return;
2424 
2425   // First, attempt to eliminate locks
2426   int cnt = C->macro_count();
2427   for (int i=0; i < cnt; i++) {
2428     Node *n = C->macro_node(i);
2429     if (n->is_AbstractLock()) { // Lock and Unlock nodes
2430       // Before elimination mark all associated (same box and obj)
2431       // lock and unlock nodes.
2432       mark_eliminated_locking_nodes(n->as_AbstractLock());
2433     }
2434   }
2435   bool progress = true;
2436   while (progress) {
2437     progress = false;
2438     for (int i = C->macro_count(); i > 0; i--) {


2443         success = eliminate_locking_node(n->as_AbstractLock());
2444       }
2445       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2446       progress = progress || success;
2447     }
2448   }
2449   // Next, attempt to eliminate allocations
2450   _has_locks = false;
2451   progress = true;
2452   while (progress) {
2453     progress = false;
2454     for (int i = C->macro_count(); i > 0; i--) {
2455       Node * n = C->macro_node(i-1);
2456       bool success = false;
2457       debug_only(int old_macro_count = C->macro_count(););
2458       switch (n->class_id()) {
2459       case Node::Class_Allocate:
2460       case Node::Class_AllocateArray:
2461         success = eliminate_allocate_node(n->as_Allocate());
2462         break;
2463       case Node::Class_CallStaticJava:
2464         success = eliminate_boxing_node(n->as_CallStaticJava());



2465         break;

2466       case Node::Class_Lock:
2467       case Node::Class_Unlock:
2468         assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
2469         _has_locks = true;
2470         break;
2471       case Node::Class_ArrayCopy:
2472         break;
2473       case Node::Class_OuterStripMinedLoop:
2474         break;
2475       default:
2476         assert(n->Opcode() == Op_LoopLimit ||
2477                n->Opcode() == Op_Opaque1   ||
2478                n->Opcode() == Op_Opaque2   ||
2479                n->Opcode() == Op_Opaque3   ||
2480                BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n),
2481                "unknown node type in macro list");
2482       }
2483       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2484       progress = progress || success;
2485     }


2495   // Make sure expansion will not cause node limit to be exceeded.
2496   // Worst case is a macro node gets expanded into about 200 nodes.
2497   // Allow 50% more for optimization.
2498   if (C->check_node_count(C->macro_count() * 300, "out of nodes before macro expansion" ) )
2499     return true;
2500 
2501   // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2502   bool progress = true;
2503   while (progress) {
2504     progress = false;
2505     for (int i = C->macro_count(); i > 0; i--) {
2506       Node * n = C->macro_node(i-1);
2507       bool success = false;
2508       debug_only(int old_macro_count = C->macro_count(););
2509       if (n->Opcode() == Op_LoopLimit) {
2510         // Remove it from macro list and put on IGVN worklist to optimize.
2511         C->remove_macro_node(n);
2512         _igvn._worklist.push(n);
2513         success = true;
2514       } else if (n->Opcode() == Op_CallStaticJava) {
2515         // Remove it from macro list and put on IGVN worklist to optimize.
2516         C->remove_macro_node(n);
2517         _igvn._worklist.push(n);
2518         success = true;



2519       } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
2520         _igvn.replace_node(n, n->in(1));
2521         success = true;
2522 #if INCLUDE_RTM_OPT
2523       } else if ((n->Opcode() == Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
2524         assert(C->profile_rtm(), "should be used only in rtm deoptimization code");
2525         assert((n->outcnt() == 1) && n->unique_out()->is_Cmp(), "");
2526         Node* cmp = n->unique_out();
2527 #ifdef ASSERT
2528         // Validate graph.
2529         assert((cmp->outcnt() == 1) && cmp->unique_out()->is_Bool(), "");
2530         BoolNode* bol = cmp->unique_out()->as_Bool();
2531         assert((bol->outcnt() == 1) && bol->unique_out()->is_If() &&
2532                (bol->_test._test == BoolTest::ne), "");
2533         IfNode* ifn = bol->unique_out()->as_If();
2534         assert((ifn->outcnt() == 2) &&
2535                ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != NULL, "");
2536 #endif
2537         Node* repl = n->in(1);
2538         if (!_has_locks) {


2578     int macro_count = C->macro_count();
2579     Node * n = C->macro_node(macro_count-1);
2580     assert(n->is_macro(), "only macro nodes expected here");
2581     if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
2582       // node is unreachable, so don't try to expand it
2583       C->remove_macro_node(n);
2584       continue;
2585     }
2586     switch (n->class_id()) {
2587     case Node::Class_Allocate:
2588       expand_allocate(n->as_Allocate());
2589       break;
2590     case Node::Class_AllocateArray:
2591       expand_allocate_array(n->as_AllocateArray());
2592       break;
2593     case Node::Class_Lock:
2594       expand_lock_node(n->as_Lock());
2595       break;
2596     case Node::Class_Unlock:
2597       expand_unlock_node(n->as_Unlock());




2598       break;
2599     default:
2600       assert(false, "unknown node type in macro list");
2601     }
2602     assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
2603     if (C->failing())  return true;
2604   }
2605 
2606   _igvn.set_delay_transform(false);
2607   _igvn.optimize();
2608   if (C->failing())  return true;
2609   return false;
2610 }


  30 #include "opto/addnode.hpp"
  31 #include "opto/arraycopynode.hpp"
  32 #include "opto/callnode.hpp"
  33 #include "opto/castnode.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/compile.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/graphKit.hpp"
  38 #include "opto/locknode.hpp"
  39 #include "opto/loopnode.hpp"
  40 #include "opto/macro.hpp"
  41 #include "opto/memnode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/node.hpp"
  44 #include "opto/opaquenode.hpp"
  45 #include "opto/phaseX.hpp"
  46 #include "opto/rootnode.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "opto/subnode.hpp"
  49 #include "opto/type.hpp"
  50 #include "opto/valuetypenode.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "utilities/macros.hpp"
  53 #if INCLUDE_G1GC
  54 #include "gc/g1/g1ThreadLocalData.hpp"
  55 #endif // INCLUDE_G1GC
  56 #if INCLUDE_SHENANDOAHGC
  57 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  58 #endif
  59 
  60 
  61 //
  62 // Replace any references to "oldref" in inputs to "use" with "newref".
  63 // Returns the number of replacements made.
  64 //
  65 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  66   int nreplacements = 0;
  67   uint req = use->req();
  68   for (uint j = 0; j < use->len(); j++) {
  69     Node *uin = use->in(j);
  70     if (uin == oldref) {
  71       if (j < req)
  72         use->set_req(j, newref);
  73       else
  74         use->set_prec(j, newref);
  75       nreplacements++;
  76     } else if (j >= req && uin == NULL) {
  77       break;
  78     }
  79   }
  80   return nreplacements;
  81 }
  82 
  83 void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) {
  84   // Copy debug information and adjust JVMState information
  85   uint old_dbg_start = oldcall->tf()->domain_sig()->cnt();
  86   uint new_dbg_start = newcall->tf()->domain_sig()->cnt();
  87   int jvms_adj  = new_dbg_start - old_dbg_start;
  88   assert (new_dbg_start == newcall->req(), "argument count mismatch");
  89 
  90   // SafePointScalarObject node could be referenced several times in debug info.
  91   // Use Dict to record cloned nodes.
  92   Dict* sosn_map = new Dict(cmpkey,hashkey);
  93   for (uint i = old_dbg_start; i < oldcall->req(); i++) {
  94     Node* old_in = oldcall->in(i);
  95     // Clone old SafePointScalarObjectNodes, adjusting their field contents.
  96     if (old_in != NULL && old_in->is_SafePointScalarObject()) {
  97       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
  98       uint old_unique = C->unique();
  99       Node* new_in = old_sosn->clone(sosn_map);
 100       if (old_unique != C->unique()) { // New node?
 101         new_in->set_req(0, C->root()); // reset control edge
 102         new_in = transform_later(new_in); // Register new node.
 103       }
 104       old_in = new_in;
 105     }
 106     newcall->add_req(old_in);


 261           if (call->as_ArrayCopy()->modifies(offset, offset, phase, false)) {
 262             return in;
 263           }
 264         }
 265         mem = in->in(TypeFunc::Memory);
 266       } else if (in->is_MemBar()) {
 267         ArrayCopyNode* ac = NULL;
 268         if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) {
 269           assert(ac != NULL && ac->is_clonebasic(), "Only basic clone is a non escaping clone");
 270           return ac;
 271         }
 272         mem = in->in(TypeFunc::Memory);
 273       } else {
 274         assert(false, "unexpected projection");
 275       }
 276     } else if (mem->is_Store()) {
 277       const TypePtr* atype = mem->as_Store()->adr_type();
 278       int adr_idx = phase->C->get_alias_index(atype);
 279       if (adr_idx == alias_idx) {
 280         assert(atype->isa_oopptr(), "address type must be oopptr");
 281         int adr_offset = atype->flattened_offset();
 282         uint adr_iid = atype->is_oopptr()->instance_id();
 283         // Array elements references have the same alias_idx
 284         // but different offset and different instance_id.
 285         if (adr_offset == offset && adr_iid == alloc->_idx)
 286           return mem;
 287       } else {
 288         assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
 289       }
 290       mem = mem->in(MemNode::Memory);
 291     } else if (mem->is_ClearArray()) {
 292       if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
 293         // Can not bypass initialization of the instance
 294         // we are looking.
 295         debug_only(intptr_t offset;)
 296         assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
 297         InitializeNode* init = alloc->as_Allocate()->initialization();
 298         // We are looking for stored value, return Initialize node
 299         // or memory edge from Allocate node.
 300         if (init != NULL)
 301           return init;


 304       }
 305       // Otherwise skip it (the call updated 'mem' value).
 306     } else if (mem->Opcode() == Op_SCMemProj) {
 307       mem = mem->in(0);
 308       Node* adr = NULL;
 309       if (mem->is_LoadStore()) {
 310         adr = mem->in(MemNode::Address);
 311       } else {
 312         assert(mem->Opcode() == Op_EncodeISOArray ||
 313                mem->Opcode() == Op_StrCompressedCopy, "sanity");
 314         adr = mem->in(3); // Destination array
 315       }
 316       const TypePtr* atype = adr->bottom_type()->is_ptr();
 317       int adr_idx = phase->C->get_alias_index(atype);
 318       if (adr_idx == alias_idx) {
 319         DEBUG_ONLY(mem->dump();)
 320         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 321         return NULL;
 322       }
 323       mem = mem->in(MemNode::Memory);
 324     } else if (mem->Opcode() == Op_StrInflatedCopy) {
 325       Node* adr = mem->in(3); // Destination array
 326       const TypePtr* atype = adr->bottom_type()->is_ptr();
 327       int adr_idx = phase->C->get_alias_index(atype);
 328       if (adr_idx == alias_idx) {
 329         DEBUG_ONLY(mem->dump();)
 330         assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
 331         return NULL;
 332       }
 333       mem = mem->in(MemNode::Memory);
 334     } else {
 335       return mem;
 336     }
 337     assert(mem != orig_mem, "dead memory loop");
 338   }
 339 }
 340 
 341 // Generate loads from source of the arraycopy for fields of
 342 // destination needed at a deoptimization point
 343 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc) {
 344   BasicType bt = ft;
 345   const Type *type = ftype;
 346   if (ft == T_NARROWOOP) {
 347     bt = T_OBJECT;
 348     type = ftype->make_oopptr();
 349   }
 350   Node* res = NULL;
 351   if (ac->is_clonebasic()) {
 352     Node* base = ac->in(ArrayCopyNode::Src)->in(AddPNode::Base);
 353     Node* adr = _igvn.transform(new AddPNode(base, base, MakeConX(offset)));
 354     const TypePtr* adr_type = _igvn.type(base)->is_ptr()->add_offset(offset);
 355     res = LoadNode::make(_igvn, ctl, mem, adr, adr_type, type, bt, MemNode::unordered, LoadNode::Pinned);
 356   } else {
 357     if (ac->modifies(offset, offset, &_igvn, true)) {
 358       assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result");
 359       uint shift = exact_log2(type2aelembytes(bt));
 360       Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
 361 #ifdef _LP64
 362       diff = _igvn.transform(new ConvI2LNode(diff));
 363 #endif
 364       diff = _igvn.transform(new LShiftXNode(diff, intcon(shift)));
 365 
 366       Node* off = _igvn.transform(new AddXNode(MakeConX(offset), diff));
 367       Node* base = ac->in(ArrayCopyNode::Src);
 368       Node* adr = _igvn.transform(new AddPNode(base, base, off));
 369       const TypePtr* adr_type = _igvn.type(base)->is_ptr();
 370       if (adr_type->isa_aryptr()) {
 371         // In the case of a flattened value type array, each field has its
 372         // own slice so we need to extract the field being accessed from
 373         // the address computation
 374         adr_type = adr_type->is_aryptr()->add_field_offset_and_offset(offset);
 375         adr = _igvn.transform(new CastPPNode(adr, adr_type));
 376       } else {
 377         adr_type = adr_type->add_offset(offset);
 378       }
 379       res = LoadNode::make(_igvn, ctl, mem, adr, adr_type, type, bt, MemNode::unordered, LoadNode::Pinned);
 380     }
 381   }
 382   if (res != NULL) {
 383     res = _igvn.transform(res);
 384     if (ftype->isa_narrowoop()) {
 385       // PhaseMacroExpand::scalar_replacement adds DecodeN nodes
 386       assert(res->isa_DecodeN(), "should be narrow oop");
 387       res = _igvn.transform(new EncodePNode(res, ftype));
 388     }
 389     return res;
 390   }
 391   return NULL;
 392 }
 393 
 394 //
 395 // Given a Memory Phi, compute a value Phi containing the values from stores
 396 // on the input paths.
 397 // Note: this function is recursive, its depth is limited by the "level" argument
 398 // Returns the computed Phi, or NULL if it cannot compute it.
 399 Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
 400   assert(mem->is_Phi(), "sanity");
 401   int alias_idx = C->get_alias_index(adr_t);
 402   int offset = adr_t->flattened_offset();
 403   int instance_id = adr_t->instance_id();
 404 
 405   // Check if an appropriate value phi already exists.
 406   Node* region = mem->in(0);
 407   for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
 408     Node* phi = region->fast_out(k);
 409     if (phi->is_Phi() && phi != mem &&
 410         phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) {
 411       return phi;
 412     }
 413   }
 414   // Check if an appropriate new value phi already exists.
 415   Node* new_phi = value_phis->find(mem->_idx);
 416   if (new_phi != NULL)
 417     return new_phi;
 418 
 419   if (level <= 0) {
 420     return NULL; // Give up: phi tree too deep
 421   }
 422   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
 423   Node *alloc_mem = alloc->in(TypeFunc::Memory);
 424 
 425   uint length = mem->req();
 426   GrowableArray <Node *> values(length, length, NULL, false);
 427 
 428   // create a new Phi for the value
 429   PhiNode *phi = new PhiNode(mem->in(0), phi_type, NULL, mem->_idx, instance_id, alias_idx, offset);
 430   transform_later(phi);
 431   value_phis->push(phi, mem->_idx);
 432 
 433   for (uint j = 1; j < length; j++) {
 434     Node *in = mem->in(j);
 435     if (in == NULL || in->is_top()) {
 436       values.at_put(j, in);
 437     } else  {
 438       Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
 439       if (val == start_mem || val == alloc_mem) {
 440         // hit a sentinel, return appropriate 0 value
 441         Node* default_value = alloc->in(AllocateNode::DefaultValue);
 442         if (default_value != NULL) {
 443           values.at_put(j, default_value);
 444         } else {
 445           assert(alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null");
 446           values.at_put(j, _igvn.zerocon(ft));
 447         }
 448         continue;
 449       }
 450       if (val->is_Initialize()) {
 451         val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 452       }
 453       if (val == NULL) {
 454         return NULL;  // can't find a value on this path
 455       }
 456       if (val == mem) {
 457         values.at_put(j, mem);
 458       } else if (val->is_Store()) {
 459         Node* n = val->in(MemNode::ValueIn);
 460         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 461         n = bs->step_over_gc_barrier(n);
 462         values.at_put(j, n);
 463       } else if(val->is_Proj() && val->in(0) == alloc) {
 464         Node* default_value = alloc->in(AllocateNode::DefaultValue);
 465         if (default_value != NULL) {
 466           values.at_put(j, default_value);
 467         } else {
 468           assert(alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null");
 469           values.at_put(j, _igvn.zerocon(ft));
 470         }
 471       } else if (val->is_Phi()) {
 472         val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
 473         if (val == NULL) {
 474           return NULL;
 475         }
 476         values.at_put(j, val);
 477       } else if (val->Opcode() == Op_SCMemProj) {
 478         assert(val->in(0)->is_LoadStore() ||
 479                val->in(0)->Opcode() == Op_EncodeISOArray ||
 480                val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
 481         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 482         return NULL;
 483       } else if (val->is_ArrayCopy()) {
 484         Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
 485         if (res == NULL) {
 486           return NULL;
 487         }
 488         values.at_put(j, res);
 489       } else {
 490 #ifdef ASSERT


 496     }
 497   }
 498   // Set Phi's inputs
 499   for (uint j = 1; j < length; j++) {
 500     if (values.at(j) == mem) {
 501       phi->init_req(j, phi);
 502     } else {
 503       phi->init_req(j, values.at(j));
 504     }
 505   }
 506   return phi;
 507 }
 508 
 509 // Search the last value stored into the object's field.
 510 Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc) {
 511   assert(adr_t->is_known_instance_field(), "instance required");
 512   int instance_id = adr_t->instance_id();
 513   assert((uint)instance_id == alloc->_idx, "wrong allocation");
 514 
 515   int alias_idx = C->get_alias_index(adr_t);
 516   int offset = adr_t->flattened_offset();
 517   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);

 518   Node *alloc_mem = alloc->in(TypeFunc::Memory);
 519   Arena *a = Thread::current()->resource_area();
 520   VectorSet visited(a);
 521 

 522   bool done = sfpt_mem == alloc_mem;
 523   Node *mem = sfpt_mem;
 524   while (!done) {
 525     if (visited.test_set(mem->_idx)) {
 526       return NULL;  // found a loop, give up
 527     }
 528     mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
 529     if (mem == start_mem || mem == alloc_mem) {
 530       done = true;  // hit a sentinel, return appropriate 0 value
 531     } else if (mem->is_Initialize()) {
 532       mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 533       if (mem == NULL) {
 534         done = true; // Something went wrong.
 535       } else if (mem->is_Store()) {
 536         const TypePtr* atype = mem->as_Store()->adr_type();
 537         assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
 538         done = true;
 539       }
 540     } else if (mem->is_Store()) {
 541       const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
 542       assert(atype != NULL, "address type must be oopptr");
 543       assert(C->get_alias_index(atype) == alias_idx &&
 544              atype->is_known_instance_field() && atype->flattened_offset() == offset &&
 545              atype->instance_id() == instance_id, "store is correct memory slice");
 546       done = true;
 547     } else if (mem->is_Phi()) {
 548       // try to find a phi's unique input
 549       Node *unique_input = NULL;
 550       Node *top = C->top();
 551       for (uint i = 1; i < mem->req(); i++) {
 552         Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
 553         if (n == NULL || n == top || n == mem) {
 554           continue;
 555         } else if (unique_input == NULL) {
 556           unique_input = n;
 557         } else if (unique_input != n) {
 558           unique_input = top;
 559           break;
 560         }
 561       }
 562       if (unique_input != NULL && unique_input != top) {
 563         mem = unique_input;
 564       } else {
 565         done = true;
 566       }
 567     } else if (mem->is_ArrayCopy()) {
 568       done = true;
 569     } else {
 570       assert(false, "unexpected node");
 571     }
 572   }
 573   if (mem != NULL) {
 574     if (mem == start_mem || mem == alloc_mem) {
 575       // hit a sentinel, return appropriate 0 value
 576       Node* default_value = alloc->in(AllocateNode::DefaultValue);
 577       if (default_value != NULL) {
 578         return default_value;
 579       }
 580       assert(alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null");
 581       return _igvn.zerocon(ft);
 582     } else if (mem->is_Store()) {
 583       Node* n = mem->in(MemNode::ValueIn);
 584       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 585       n = bs->step_over_gc_barrier(n);
 586       return n;
 587     } else if (mem->is_Phi()) {
 588       // attempt to produce a Phi reflecting the values on the input paths of the Phi
 589       Node_Stack value_phis(a, 8);
 590       Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
 591       if (phi != NULL) {
 592         return phi;
 593       } else {
 594         // Kill all new Phis
 595         while(value_phis.is_nonempty()) {
 596           Node* n = value_phis.node();
 597           _igvn.replace_node(n, C->top());
 598           value_phis.pop();
 599         }
 600       }
 601     } else if (mem->is_ArrayCopy()) {
 602       Node* ctl = mem->in(0);
 603       Node* m = mem->in(TypeFunc::Memory);
 604       if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 605         // pin the loads in the uncommon trap path
 606         ctl = sfpt_ctl;
 607         m = sfpt_mem;
 608       }
 609       return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc);
 610     }
 611   }
 612   // Something went wrong.
 613   return NULL;
 614 }
 615 
 616 // Search the last value stored into the value type's fields.
 617 Node* PhaseMacroExpand::value_type_from_mem(Node* mem, Node* ctl, ciValueKlass* vk, const TypeAryPtr* adr_type, int offset, AllocateNode* alloc) {
 618   // Subtract the offset of the first field to account for the missing oop header
 619   offset -= vk->first_field_offset();
 620   // Create a new ValueTypeNode and retrieve the field values from memory
 621   ValueTypeNode* vt = ValueTypeNode::make_uninitialized(_igvn, vk)->as_ValueType();
 622   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); ++i) {
 623     ciType* field_type = vt->field_type(i);
 624     int field_offset = offset + vt->field_offset(i);
 625     // Each value type field has its own memory slice
 626     adr_type = adr_type->with_field_offset(field_offset);
 627     Node* value = NULL;
 628     if (vt->field_is_flattened(i)) {
 629       value = value_type_from_mem(mem, ctl, field_type->as_value_klass(), adr_type, field_offset, alloc);
 630     } else {
 631       const Type* ft = Type::get_const_type(field_type);
 632       BasicType bt = field_type->basic_type();
 633       if (UseCompressedOops && !is_java_primitive(bt)) {
 634         ft = ft->make_narrowoop();
 635         bt = T_NARROWOOP;
 636       }
 637       value = value_from_mem(mem, ctl, bt, ft, adr_type, alloc);
 638       if (value != NULL && ft->isa_narrowoop()) {
 639         assert(UseCompressedOops, "unexpected narrow oop");
 640         value = transform_later(new DecodeNNode(value, value->get_ptr_type()));
 641       }
 642     }
 643     if (value != NULL) {
 644       vt->set_field_value(i, value);
 645     } else {
 646       // We might have reached the TrackedInitializationLimit
 647       return NULL;
 648     }
 649   }
 650   return transform_later(vt);
 651 }
 652 
 653 // Check the possibility of scalar replacement.
 654 bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
 655   //  Scan the uses of the allocation to check for anything that would
 656   //  prevent us from eliminating it.
 657   NOT_PRODUCT( const char* fail_eliminate = NULL; )
 658   DEBUG_ONLY( Node* disq_node = NULL; )
 659   bool  can_eliminate = true;
 660 
 661   Node* res = alloc->result_cast();
 662   const TypeOopPtr* res_type = NULL;
 663   if (res == NULL) {
 664     // All users were eliminated.
 665   } else if (!res->is_CheckCastPP()) {
 666     NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
 667     can_eliminate = false;
 668   } else {
 669     res_type = _igvn.type(res)->isa_oopptr();
 670     if (res_type == NULL) {
 671       NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
 672       can_eliminate = false;


 688         const TypePtr* addp_type = _igvn.type(use)->is_ptr();
 689         int offset = addp_type->offset();
 690 
 691         if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
 692           NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
 693           can_eliminate = false;
 694           break;
 695         }
 696         for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
 697                                    k < kmax && can_eliminate; k++) {
 698           Node* n = use->fast_out(k);
 699           if (!n->is_Store() && n->Opcode() != Op_CastP2X &&
 700               SHENANDOAHGC_ONLY((!UseShenandoahGC || !ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(n)) &&)
 701               !(n->is_ArrayCopy() &&
 702                 n->as_ArrayCopy()->is_clonebasic() &&
 703                 n->in(ArrayCopyNode::Dest) == use)) {
 704             DEBUG_ONLY(disq_node = n;)
 705             if (n->is_Load() || n->is_LoadStore()) {
 706               NOT_PRODUCT(fail_eliminate = "Field load";)
 707             } else {
 708               NOT_PRODUCT(fail_eliminate = "Not store field reference";)
 709             }
 710             can_eliminate = false;
 711           }
 712         }
 713       } else if (use->is_ArrayCopy() &&
 714                  (use->as_ArrayCopy()->is_arraycopy_validated() ||
 715                   use->as_ArrayCopy()->is_copyof_validated() ||
 716                   use->as_ArrayCopy()->is_copyofrange_validated()) &&
 717                  use->in(ArrayCopyNode::Dest) == res) {
 718         // ok to eliminate
 719       } else if (use->is_SafePoint()) {
 720         SafePointNode* sfpt = use->as_SafePoint();
 721         if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
 722           // Object is passed as argument.
 723           DEBUG_ONLY(disq_node = use;)
 724           NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
 725           can_eliminate = false;
 726         }
 727         Node* sfptMem = sfpt->memory();
 728         if (sfptMem == NULL || sfptMem->is_top()) {
 729           DEBUG_ONLY(disq_node = use;)
 730           NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";)
 731           can_eliminate = false;
 732         } else {
 733           safepoints.append_if_missing(sfpt);
 734         }
 735       } else if (use->is_ValueType() && use->isa_ValueType()->get_oop() == res) {
 736         // ok to eliminate
 737       } else if (use->is_Store()) {
 738         // store to mark work
 739       } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
 740         if (use->is_Phi()) {
 741           if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
 742             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 743           } else {
 744             NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
 745           }
 746           DEBUG_ONLY(disq_node = use;)
 747         } else {
 748           if (use->Opcode() == Op_Return) {
 749             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 750           } else {
 751             NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
 752           }
 753           DEBUG_ONLY(disq_node = use;)
 754         }
 755         can_eliminate = false;
 756       } else {
 757         assert(use->Opcode() == Op_CastP2X, "should be");
 758         assert(!use->has_out_with(Op_OrL), "should have been removed because oop is never null");
 759       }
 760     }
 761   }
 762 
 763 #ifndef PRODUCT
 764   if (PrintEliminateAllocations) {
 765     if (can_eliminate) {
 766       tty->print("Scalar ");
 767       if (res == NULL)
 768         alloc->dump();
 769       else
 770         res->dump();
 771     } else if (alloc->_is_scalar_replaceable) {
 772       tty->print("NotScalar (%s)", fail_eliminate);
 773       if (res == NULL)
 774         alloc->dump();
 775       else
 776         res->dump();
 777 #ifdef ASSERT
 778       if (disq_node != NULL) {


 801   Node* res = alloc->result_cast();
 802   assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
 803   const TypeOopPtr* res_type = NULL;
 804   if (res != NULL) { // Could be NULL when there are no users
 805     res_type = _igvn.type(res)->isa_oopptr();
 806   }
 807 
 808   if (res != NULL) {
 809     klass = res_type->klass();
 810     if (res_type->isa_instptr()) {
 811       // find the fields of the class which will be needed for safepoint debug information
 812       assert(klass->is_instance_klass(), "must be an instance klass.");
 813       iklass = klass->as_instance_klass();
 814       nfields = iklass->nof_nonstatic_fields();
 815     } else {
 816       // find the array's elements which will be needed for safepoint debug information
 817       nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
 818       assert(klass->is_array_klass() && nfields >= 0, "must be an array klass.");
 819       elem_type = klass->as_array_klass()->element_type();
 820       basic_elem_type = elem_type->basic_type();
 821       if (elem_type->is_valuetype() && !klass->is_value_array_klass()) {
 822         assert(basic_elem_type == T_VALUETYPE, "unexpected element basic type");
 823         basic_elem_type = T_OBJECT;
 824       }
 825       array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
 826       element_size = type2aelembytes(basic_elem_type);
 827       if (klass->is_value_array_klass()) {
 828         // Flattened value type array
 829         element_size = klass->as_value_array_klass()->element_byte_size();
 830       }
 831     }
 832   }
 833   //
 834   // Process the safepoint uses
 835   //
 836   Unique_Node_List value_worklist;
 837   while (safepoints.length() > 0) {
 838     SafePointNode* sfpt = safepoints.pop();
 839     Node* mem = sfpt->memory();
 840     Node* ctl = sfpt->control();
 841     assert(sfpt->jvms() != NULL, "missed JVMS");
 842     // Fields of scalar objs are referenced only at the end
 843     // of regular debuginfo at the last (youngest) JVMS.
 844     // Record relative start index.
 845     uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
 846     SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type,
 847 #ifdef ASSERT
 848                                                  alloc,
 849 #endif
 850                                                  first_ind, nfields);
 851     sobj->init_req(0, C->root());
 852     transform_later(sobj);
 853 
 854     // Scan object's fields adding an input to the safepoint for each field.
 855     for (int j = 0; j < nfields; j++) {
 856       intptr_t offset;
 857       ciField* field = NULL;
 858       if (iklass != NULL) {
 859         field = iklass->nonstatic_field_at(j);
 860         offset = field->offset();
 861         elem_type = field->type();
 862         basic_elem_type = field->layout_type();
 863         assert(!field->is_flattened(), "flattened value type fields should not have safepoint uses");
 864       } else {
 865         offset = array_base + j * (intptr_t)element_size;
 866       }
 867 
 868       const Type *field_type;
 869       // The next code is taken from Parse::do_get_xxx().
 870       if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) {
 871         if (!elem_type->is_loaded()) {
 872           field_type = TypeInstPtr::BOTTOM;
 873         } else if (field != NULL && field->is_static_constant()) {
 874           // This can happen if the constant oop is non-perm.
 875           ciObject* con = field->constant_value().as_object();
 876           // Do not "join" in the previous type; it doesn't add value,
 877           // and may yield a vacuous result if the field is of interface type.
 878           field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 879           assert(field_type != NULL, "field singleton type must be consistent");
 880         } else {
 881           field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
 882         }
 883         if (UseCompressedOops) {
 884           field_type = field_type->make_narrowoop();
 885           basic_elem_type = T_NARROWOOP;
 886         }
 887       } else {
 888         field_type = Type::get_const_basic_type(basic_elem_type);
 889       }
 890 
 891       Node* field_val = NULL;
 892       const TypeOopPtr* field_addr_type = res_type->add_offset(offset)->isa_oopptr();
 893       if (klass->is_value_array_klass()) {
 894         ciValueKlass* vk = elem_type->as_value_klass();
 895         assert(vk->flatten_array(), "must be flattened");
 896         field_val = value_type_from_mem(mem, ctl, vk, field_addr_type->isa_aryptr(), 0, alloc);
 897       } else {
 898         field_val = value_from_mem(mem, ctl, basic_elem_type, field_type, field_addr_type, alloc);
 899       }
 900       if (field_val == NULL) {
 901         // We weren't able to find a value for this field,
 902         // give up on eliminating this allocation.
 903 
 904         // Remove any extra entries we added to the safepoint.
 905         uint last = sfpt->req() - 1;
 906         for (int k = 0;  k < j; k++) {
 907           sfpt->del_req(last--);
 908         }
 909         _igvn._worklist.push(sfpt);
 910         // rollback processed safepoints
 911         while (safepoints_done.length() > 0) {
 912           SafePointNode* sfpt_done = safepoints_done.pop();
 913           // remove any extra entries we added to the safepoint
 914           last = sfpt_done->req() - 1;
 915           for (int k = 0;  k < nfields; k++) {
 916             sfpt_done->del_req(last--);
 917           }
 918           JVMState *jvms = sfpt_done->jvms();
 919           jvms->set_endoff(sfpt_done->req());


 945             tty->print("=== At SafePoint node %d can't find value of array element [%d]",
 946                        sfpt->_idx, j);
 947           }
 948           tty->print(", which prevents elimination of: ");
 949           if (res == NULL)
 950             alloc->dump();
 951           else
 952             res->dump();
 953         }
 954 #endif
 955         return false;
 956       }
 957       if (UseCompressedOops && field_type->isa_narrowoop()) {
 958         // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
 959         // to be able scalar replace the allocation.
 960         if (field_val->is_EncodeP()) {
 961           field_val = field_val->in(1);
 962         } else {
 963           field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
 964         }
 965       } else if (field_val->is_ValueType()) {
 966         // Keep track of value types to scalarize them later
 967         value_worklist.push(field_val);
 968       }
 969       sfpt->add_req(field_val);
 970     }
 971     JVMState *jvms = sfpt->jvms();
 972     jvms->set_endoff(sfpt->req());
 973     // Now make a pass over the debug information replacing any references
 974     // to the allocated object with "sobj"
 975     int start = jvms->debug_start();
 976     int end   = jvms->debug_end();
 977     sfpt->replace_edges_in_range(res, sobj, start, end);
 978     _igvn._worklist.push(sfpt);
 979     safepoints_done.append_if_missing(sfpt); // keep it for rollback
 980   }
 981   // Scalarize value types that were added to the safepoint
 982   for (uint i = 0; i < value_worklist.size(); ++i) {
 983     Node* vt = value_worklist.at(i);
 984     vt->as_ValueType()->make_scalar_in_safepoints(&_igvn);
 985   }
 986   return true;
 987 }
 988 
 989 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
 990   Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
 991   Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
 992   if (ctl_proj != NULL) {
 993     igvn.replace_node(ctl_proj, n->in(0));
 994   }
 995   if (mem_proj != NULL) {
 996     igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
 997   }
 998 }
 999 
1000 // Process users of eliminated allocation.
1001 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
1002   Node* res = alloc->result_cast();
1003   if (res != NULL) {
1004     for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
1005       Node *use = res->last_out(j);


1030             Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out();
1031             disconnect_projections(ac, _igvn);
1032             assert(alloc->in(0)->is_Proj() && alloc->in(0)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation");
1033             Node* membar_before = alloc->in(0)->in(0);
1034             disconnect_projections(membar_before->as_MemBar(), _igvn);
1035             if (membar_after->is_MemBar()) {
1036               disconnect_projections(membar_after->as_MemBar(), _igvn);
1037             }
1038           } else {
1039             eliminate_gc_barrier(n);
1040           }
1041           k -= (oc2 - use->outcnt());
1042         }
1043         _igvn.remove_dead_node(use);
1044       } else if (use->is_ArrayCopy()) {
1045         // Disconnect ArrayCopy node
1046         ArrayCopyNode* ac = use->as_ArrayCopy();
1047         assert(ac->is_arraycopy_validated() ||
1048                ac->is_copyof_validated() ||
1049                ac->is_copyofrange_validated(), "unsupported");
1050         CallProjections* callprojs = ac->extract_projections(true);

1051 
1052         _igvn.replace_node(callprojs->fallthrough_ioproj, ac->in(TypeFunc::I_O));
1053         _igvn.replace_node(callprojs->fallthrough_memproj, ac->in(TypeFunc::Memory));
1054         _igvn.replace_node(callprojs->fallthrough_catchproj, ac->in(TypeFunc::Control));
1055 
1056         // Set control to top. IGVN will remove the remaining projections
1057         ac->set_req(0, top());
1058         ac->replace_edge(res, top());
1059 
1060         // Disconnect src right away: it can help find new
1061         // opportunities for allocation elimination
1062         Node* src = ac->in(ArrayCopyNode::Src);
1063         ac->replace_edge(src, top());
1064         // src can be top at this point if src and dest of the
1065         // arraycopy were the same
1066         if (src->outcnt() == 0 && !src->is_top()) {
1067           _igvn.remove_dead_node(src);
1068         }
1069 
1070         _igvn._worklist.push(ac);
1071       } else if (use->is_ValueType()) {
1072         assert(use->isa_ValueType()->get_oop() == res, "unexpected value type use");
1073          _igvn.rehash_node_delayed(use);
1074         use->isa_ValueType()->set_oop(_igvn.zerocon(T_VALUETYPE));
1075       } else if (use->is_Store()) {
1076         _igvn.replace_node(use, use->in(MemNode::Memory));
1077       } else {
1078         eliminate_gc_barrier(use);
1079       }
1080       j -= (oc1 - res->outcnt());
1081     }
1082     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
1083     _igvn.remove_dead_node(res);
1084   }
1085 
1086   //
1087   // Process other users of allocation's projections
1088   //
1089   if (_resproj != NULL && _resproj->outcnt() != 0) {
1090     // First disconnect stores captured by Initialize node.
1091     // If Initialize node is eliminated first in the following code,
1092     // it will kill such stores and DUIterator_Last will assert.
1093     for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax);  j < jmax; j++) {
1094       Node *use = _resproj->fast_out(j);
1095       if (use->is_AddP()) {
1096         // raw memory addresses used only by the initialization


1208     if (alloc->is_AllocateArray())
1209       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1210     else
1211       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1212   }
1213 #endif
1214 
1215   return true;
1216 }
1217 
1218 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1219   // EA should remove all uses of non-escaping boxing node.
1220   if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != NULL) {
1221     return false;
1222   }
1223 
1224   assert(boxing->result_cast() == NULL, "unexpected boxing node result");
1225 
1226   extract_call_projections(boxing);
1227 
1228   const TypeTuple* r = boxing->tf()->range_sig();
1229   assert(r->cnt() > TypeFunc::Parms, "sanity");
1230   const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1231   assert(t != NULL, "sanity");
1232 
1233   CompileLog* log = C->log();
1234   if (log != NULL) {
1235     log->head("eliminate_boxing type='%d'",
1236               log->identify(t->klass()));
1237     JVMState* p = boxing->jvms();
1238     while (p != NULL) {
1239       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1240       p = p->caller();
1241     }
1242     log->tail("eliminate_boxing");
1243   }
1244 
1245   process_users_of_allocation(boxing);
1246 
1247 #ifndef PRODUCT
1248   if (PrintEliminateAllocations) {


1368   Node *result_phi_i_o = NULL;
1369 
1370   // The initial slow comparison is a size check, the comparison
1371   // we want to do is a BoolTest::gt
1372   bool always_slow = false;
1373   int tv = _igvn.find_int_con(initial_slow_test, -1);
1374   if (tv >= 0) {
1375     always_slow = (tv == 1);
1376     initial_slow_test = NULL;
1377   } else {
1378     initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn);
1379   }
1380 
1381   if (C->env()->dtrace_alloc_probes() ||
1382       (!UseTLAB && !Universe::heap()->supports_inline_contig_alloc())) {
1383     // Force slow-path allocation
1384     always_slow = true;
1385     initial_slow_test = NULL;
1386   }
1387 


1388   Node *slow_region = NULL;
1389   Node *toobig_false = ctrl;
1390 
1391   assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent");
1392   // generate the initial test if necessary
1393   if (initial_slow_test != NULL ) {
1394     if (slow_region == NULL) {
1395       slow_region = new RegionNode(1);
1396     }
1397     // Now make the initial failure test.  Usually a too-big test but
1398     // might be a TRUE for finalizers or a fancy class check for
1399     // newInstance0.
1400     IfNode* toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
1401     transform_later(toobig_iff);
1402     // Plug the failing-too-big test into the slow-path region
1403     Node* toobig_true = new IfTrueNode(toobig_iff);
1404     transform_later(toobig_true);
1405     slow_region    ->add_req(toobig_true);
1406     toobig_false = new IfFalseNode(toobig_iff);
1407     transform_later(toobig_false);
1408   } else {         // No initial test, just fall into next case
1409     toobig_false = ctrl;

1410   }
1411 
1412   Node *slow_mem = mem;  // save the current memory state for slow path
1413   // generate the fast allocation code unless we know that the initial test will always go slow
1414   if (!always_slow) {
1415     // Fast path modifies only raw memory.
1416     if (mem->is_MergeMem()) {
1417       mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1418     }
1419 
1420     // allocate the Region and Phi nodes for the result
1421     result_region = new RegionNode(3);
1422     result_phi_rawmem = new PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1423     result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
1424     result_phi_i_o    = new PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
1425 
1426     // Grab regular I/O before optional prefetch may change it.
1427     // Slow-path does no I/O so just set it to the original I/O.
1428     result_phi_i_o->init_req(slow_result_path, i_o);
1429 
1430     Node* needgc_ctrl = NULL;
1431     // Name successful fast-path variables
1432     Node* fast_oop_ctrl;
1433     Node* fast_oop_rawmem;
1434 
1435     intx prefetch_lines = length != NULL ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
1436 
1437     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1438     Node* fast_oop = bs->obj_allocate(this, ctrl, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
1439                                       fast_oop_ctrl, fast_oop_rawmem,
1440                                       prefetch_lines);
1441 
1442     if (slow_region != NULL) {
1443       slow_region->add_req(needgc_ctrl);
1444       // This completes all paths into the slow merge point
1445       transform_later(slow_region);
1446     } else {
1447       // Just fall from the need-GC path straight into the VM call.
1448       slow_region = needgc_ctrl;
1449     }
1450 
1451     InitializeNode* init = alloc->initialization();
1452     fast_oop_rawmem = initialize_object(alloc,
1453                                         fast_oop_ctrl, fast_oop_rawmem, fast_oop,
1454                                         klass_node, length, size_in_bytes);
1455 
1456     // If initialization is performed by an array copy, any required
1457     // MemBarStoreStore was already added. If the object does not
1458     // escape no need for a MemBarStoreStore. If the object does not
1459     // escape in its initializer and memory barrier (MemBarStoreStore or
1460     // stronger) is already added at exit of initializer, also no need
1461     // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
1462     // so that stores that initialize this object can't be reordered
1463     // with a subsequent store that makes this object accessible by
1464     // other threads.
1465     // Other threads include java threads and JVM internal threads
1466     // (for example concurrent GC threads). Current concurrent GC


1661     assert(_ioproj_catchall->outcnt() == 0, "all uses must be deleted");
1662     _igvn.remove_dead_node(_ioproj_catchall);
1663   }
1664 
1665   // if we generated only a slow call, we are done
1666   if (always_slow) {
1667     // Now we can unhook i_o.
1668     if (result_phi_i_o->outcnt() > 1) {
1669       call->set_req(TypeFunc::I_O, top());
1670     } else {
1671       assert(result_phi_i_o->unique_ctrl_out() == call, "");
1672       // Case of new array with negative size known during compilation.
1673       // AllocateArrayNode::Ideal() optimization disconnect unreachable
1674       // following code since call to runtime will throw exception.
1675       // As result there will be no users of i_o after the call.
1676       // Leave i_o attached to this call to avoid problems in preceding graph.
1677     }
1678     return;
1679   }
1680 

1681   if (_fallthroughcatchproj != NULL) {
1682     ctrl = _fallthroughcatchproj->clone();
1683     transform_later(ctrl);
1684     _igvn.replace_node(_fallthroughcatchproj, result_region);
1685   } else {
1686     ctrl = top();
1687   }
1688   Node *slow_result;
1689   if (_resproj == NULL) {
1690     // no uses of the allocation result
1691     slow_result = top();
1692   } else {
1693     slow_result = _resproj->clone();
1694     transform_later(slow_result);
1695     _igvn.replace_node(_resproj, result_phi_rawoop);
1696   }
1697 
1698   // Plug slow-path into result merge point
1699   result_region    ->init_req( slow_result_path, ctrl );
1700   result_phi_rawoop->init_req( slow_result_path, slow_result);
1701   result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough );
1702   transform_later(result_region);
1703   transform_later(result_phi_rawoop);
1704   transform_later(result_phi_rawmem);
1705   transform_later(result_phi_i_o);
1706   // This completes all paths into the result merge point
1707 }
1708 
1709 
1710 // Helper for PhaseMacroExpand::expand_allocate_common.
1711 // Initializes the newly-allocated storage.
1712 Node* PhaseMacroExpand::initialize_object(AllocateNode* alloc,
1713                                           Node* control, Node* rawmem, Node* object,
1714                                           Node* klass_node, Node* length,
1715                                           Node* size_in_bytes) {

1716   InitializeNode* init = alloc->initialization();
1717   // Store the klass & mark bits
1718   Node* mark_node = alloc->make_ideal_mark(&_igvn, object, control, rawmem, klass_node);
1719   if (!mark_node->is_Con()) {
1720     transform_later(mark_node);
1721   }
1722   rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
1723 
1724   BasicType bt = T_METADATA;
1725   Node* metadata = klass_node;
1726   Node* properties = alloc->in(AllocateNode::StorageProperties);
1727   if (properties != NULL) {
1728     // Encode array storage properties into klass pointer
1729     assert(EnableValhalla, "array storage properties not supported");
1730     if (UseCompressedClassPointers) {
1731       // Compress the klass pointer before inserting the storage properties value
1732       metadata = transform_later(new EncodePKlassNode(metadata, metadata->bottom_type()->make_narrowklass()));
1733       metadata = transform_later(new CastN2INode(metadata));
1734       metadata = transform_later(new OrINode(metadata, transform_later(new ConvL2INode(properties))));
1735       bt = T_INT;
1736     } else {
1737       metadata = transform_later(new CastP2XNode(NULL, metadata));
1738       metadata = transform_later(new OrXNode(metadata, properties));
1739       bt = T_LONG;
1740     }
1741   }
1742   rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), metadata, bt);
1743 

1744   int header_size = alloc->minimum_header_size();  // conservatively small
1745 
1746   // Array length
1747   if (length != NULL) {         // Arrays need length field
1748     rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1749     // conservatively small header size:
1750     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1751     ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
1752     if (k->is_array_klass())    // we know the exact header size in most cases:
1753       header_size = Klass::layout_helper_header_size(k->layout_helper());
1754   }
1755 
1756   // Clear the object body, if necessary.
1757   if (init == NULL) {
1758     // The init has somehow disappeared; be cautious and clear everything.
1759     //
1760     // This can happen if a node is allocated but an uncommon trap occurs
1761     // immediately.  In this case, the Initialize gets associated with the
1762     // trap, and may be placed in a different (outer) loop, if the Allocate
1763     // is in a loop.  If (this is rare) the inner loop gets unrolled, then
1764     // there can be two Allocates to one Initialize.  The answer in all these
1765     // edge cases is safety first.  It is always safe to clear immediately
1766     // within an Allocate, and then (maybe or maybe not) clear some more later.
1767     if (!(UseTLAB && ZeroTLAB)) {
1768       rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
1769                                             alloc->in(AllocateNode::DefaultValue),
1770                                             alloc->in(AllocateNode::RawDefaultValue),
1771                                             header_size, size_in_bytes,
1772                                             &_igvn);
1773     }
1774   } else {
1775     if (!init->is_complete()) {
1776       // Try to win by zeroing only what the init does not store.
1777       // We can also try to do some peephole optimizations,
1778       // such as combining some adjacent subword stores.
1779       rawmem = init->complete_stores(control, rawmem, object,
1780                                      header_size, size_in_bytes, &_igvn);
1781     }
1782     // We have no more use for this link, since the AllocateNode goes away:
1783     init->set_req(InitializeNode::RawAddress, top());
1784     // (If we keep the link, it just confuses the register allocator,
1785     // who thinks he sees a real use of the address by the membar.)
1786   }
1787 
1788   return rawmem;
1789 }
1790 


2131         // Replace old box node with new eliminated box for all users
2132         // of the same object and mark related locks as eliminated.
2133         mark_eliminated_box(box, obj);
2134       }
2135     }
2136   }
2137 }
2138 
2139 // we have determined that this lock/unlock can be eliminated, we simply
2140 // eliminate the node without expanding it.
2141 //
2142 // Note:  The membar's associated with the lock/unlock are currently not
2143 //        eliminated.  This should be investigated as a future enhancement.
2144 //
2145 bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
2146 
2147   if (!alock->is_eliminated()) {
2148     return false;
2149   }
2150 #ifdef ASSERT
2151   const Type* obj_type = _igvn.type(alock->obj_node());
2152   assert(!obj_type->isa_valuetype() && !obj_type->is_valuetypeptr(), "Eliminating lock on value type");
2153   if (!alock->is_coarsened()) {
2154     // Check that new "eliminated" BoxLock node is created.
2155     BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
2156     assert(oldbox->is_eliminated(), "should be done already");
2157   }
2158 #endif
2159 
2160   alock->log_lock_optimization(C, "eliminate_lock");
2161 
2162 #ifndef PRODUCT
2163   if (PrintEliminateLocks) {
2164     if (alock->is_Lock()) {
2165       tty->print_cr("++++ Eliminated: %d Lock", alock->_idx);
2166     } else {
2167       tty->print_cr("++++ Eliminated: %d Unlock", alock->_idx);
2168     }
2169   }
2170 #endif
2171 
2172   Node* mem  = alock->in(TypeFunc::Memory);


2413     // region->in(2) is set to fast path - the object is locked to the current thread.
2414 
2415     slow_path->init_req(2, ctrl); // Capture slow-control
2416     slow_mem->init_req(2, fast_lock_mem_phi);
2417 
2418     transform_later(slow_path);
2419     transform_later(slow_mem);
2420     // Reset lock's memory edge.
2421     lock->set_req(TypeFunc::Memory, slow_mem);
2422 
2423   } else {
2424     region  = new RegionNode(3);
2425     // create a Phi for the memory state
2426     mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2427 
2428     // Optimize test; set region slot 2
2429     slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0);
2430     mem_phi->init_req(2, mem);
2431   }
2432 
2433   const TypeOopPtr* objptr = _igvn.type(obj)->make_oopptr();
2434   if (objptr->can_be_value_type()) {
2435     // Deoptimize and re-execute if a value
2436     assert(EnableValhalla, "should only be used if value types are enabled");
2437     Node* mark = make_load(slow_path, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
2438     Node* value_mask = _igvn.MakeConX(markOopDesc::always_locked_pattern);
2439     Node* is_value = _igvn.transform(new AndXNode(mark, value_mask));
2440     Node* cmp = _igvn.transform(new CmpXNode(is_value, value_mask));
2441     Node* bol = _igvn.transform(new BoolNode(cmp, BoolTest::eq));
2442     Node* unc_ctrl = generate_slow_guard(&slow_path, bol, NULL);
2443 
2444     int trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_class_check, Deoptimization::Action_none);
2445     address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
2446     const TypePtr* no_memory_effects = NULL;
2447     JVMState* jvms = lock->jvms();
2448     CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap",
2449                                            jvms->bci(), no_memory_effects);
2450 
2451     unc->init_req(TypeFunc::Control, unc_ctrl);
2452     unc->init_req(TypeFunc::I_O, lock->i_o());
2453     unc->init_req(TypeFunc::Memory, mem); // may gc ptrs
2454     unc->init_req(TypeFunc::FramePtr,  lock->in(TypeFunc::FramePtr));
2455     unc->init_req(TypeFunc::ReturnAdr, lock->in(TypeFunc::ReturnAdr));
2456     unc->init_req(TypeFunc::Parms+0, _igvn.intcon(trap_request));
2457     unc->set_cnt(PROB_UNLIKELY_MAG(4));
2458     copy_call_debug_info(lock, unc);
2459 
2460     assert(unc->peek_monitor_box() == box, "wrong monitor");
2461     assert(unc->peek_monitor_obj() == obj, "wrong monitor");
2462 
2463     // pop monitor and push obj back on stack: we trap before the monitorenter
2464     unc->pop_monitor();
2465     unc->grow_stack(unc->jvms(), 1);
2466     unc->set_stack(unc->jvms(), unc->jvms()->stk_size()-1, obj);
2467 
2468     _igvn.register_new_node_with_optimizer(unc);
2469 
2470     Node* ctrl = _igvn.transform(new ProjNode(unc, TypeFunc::Control));
2471     Node* halt = _igvn.transform(new HaltNode(ctrl, lock->in(TypeFunc::FramePtr)));
2472     C->root()->add_req(halt);
2473   }
2474 
2475   // Make slow path call
2476   CallNode *call = make_slow_call((CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(),
2477                                   OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path,
2478                                   obj, box, NULL);
2479 
2480   extract_call_projections(call);
2481 
2482   // Slow path can only throw asynchronous exceptions, which are always
2483   // de-opted.  So the compiler thinks the slow-call can never throw an
2484   // exception.  If it DOES throw an exception we would need the debug
2485   // info removed first (since if it throws there is no monitor).
2486   assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
2487            _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
2488 
2489   // Capture slow path
2490   // disconnect fall-through projection from call and create a new one
2491   // hook up users of fall-through projection to region
2492   Node *slow_ctrl = _fallthroughproj->clone();
2493   transform_later(slow_ctrl);
2494   _igvn.hash_delete(_fallthroughproj);


2556   // No exceptions for unlocking
2557   // Capture slow path
2558   // disconnect fall-through projection from call and create a new one
2559   // hook up users of fall-through projection to region
2560   Node *slow_ctrl = _fallthroughproj->clone();
2561   transform_later(slow_ctrl);
2562   _igvn.hash_delete(_fallthroughproj);
2563   _fallthroughproj->disconnect_inputs(NULL, C);
2564   region->init_req(1, slow_ctrl);
2565   // region inputs are now complete
2566   transform_later(region);
2567   _igvn.replace_node(_fallthroughproj, region);
2568 
2569   Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) );
2570   mem_phi->init_req(1, memproj );
2571   mem_phi->init_req(2, mem);
2572   transform_later(mem_phi);
2573   _igvn.replace_node(_memproj_fallthrough, mem_phi);
2574 }
2575 
2576 // A value type might be returned from the call but we don't know its
2577 // type. Either we get a buffered value (and nothing needs to be done)
2578 // or one of the values being returned is the klass of the value type
2579 // and we need to allocate a value type instance of that type and
2580 // initialize it with other values being returned. In that case, we
2581 // first try a fast path allocation and initialize the value with the
2582 // value klass's pack handler or we fall back to a runtime call.
2583 void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
2584   assert(call->method()->is_method_handle_intrinsic(), "must be a method handle intrinsic call");
2585   Node* ret = call->proj_out_or_null(TypeFunc::Parms);
2586   if (ret == NULL) {
2587     return;
2588   }
2589   const TypeFunc* tf = call->_tf;
2590   const TypeTuple* domain = OptoRuntime::store_value_type_fields_Type()->domain_cc();
2591   const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain);
2592   call->_tf = new_tf;
2593   // Make sure the change of type is applied before projections are processed by igvn
2594   _igvn.set_type(call, call->Value(&_igvn));
2595   _igvn.set_type(ret, ret->Value(&_igvn));
2596 
2597   // Before any new projection is added:
2598   CallProjections* projs = call->extract_projections(true, true);
2599 
2600   Node* ctl = new Node(1);
2601   Node* mem = new Node(1);
2602   Node* io = new Node(1);
2603   Node* ex_ctl = new Node(1);
2604   Node* ex_mem = new Node(1);
2605   Node* ex_io = new Node(1);
2606   Node* res = new Node(1);
2607 
2608   Node* cast = transform_later(new CastP2XNode(ctl, res));
2609   Node* mask = MakeConX(0x1);
2610   Node* masked = transform_later(new AndXNode(cast, mask));
2611   Node* cmp = transform_later(new CmpXNode(masked, mask));
2612   Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
2613   IfNode* allocation_iff = new IfNode(ctl, bol, PROB_MAX, COUNT_UNKNOWN);
2614   transform_later(allocation_iff);
2615   Node* allocation_ctl = transform_later(new IfTrueNode(allocation_iff));
2616   Node* no_allocation_ctl = transform_later(new IfFalseNode(allocation_iff));
2617 
2618   Node* no_allocation_res = transform_later(new CheckCastPPNode(no_allocation_ctl, res, TypeInstPtr::BOTTOM));
2619 
2620   Node* mask2 = MakeConX(-2);
2621   Node* masked2 = transform_later(new AndXNode(cast, mask2));
2622   Node* rawklassptr = transform_later(new CastX2PNode(masked2));
2623   Node* klass_node = transform_later(new CheckCastPPNode(allocation_ctl, rawklassptr, TypeKlassPtr::OBJECT_OR_NULL));
2624 
2625   Node* slowpath_bol = NULL;
2626   Node* top_adr = NULL;
2627   Node* old_top = NULL;
2628   Node* new_top = NULL;
2629   if (UseTLAB) {
2630     Node* end_adr = NULL;
2631     set_eden_pointers(top_adr, end_adr);
2632     Node* end = make_load(ctl, mem, end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
2633     old_top = new LoadPNode(ctl, mem, top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered);
2634     transform_later(old_top);
2635     Node* layout_val = make_load(NULL, mem, klass_node, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT);
2636     Node* size_in_bytes = ConvI2X(layout_val);
2637     new_top = new AddPNode(top(), old_top, size_in_bytes);
2638     transform_later(new_top);
2639     Node* slowpath_cmp = new CmpPNode(new_top, end);
2640     transform_later(slowpath_cmp);
2641     slowpath_bol = new BoolNode(slowpath_cmp, BoolTest::ge);
2642     transform_later(slowpath_bol);
2643   } else {
2644     slowpath_bol = intcon(1);
2645     top_adr = top();
2646     old_top = top();
2647     new_top = top();
2648   }
2649   IfNode* slowpath_iff = new IfNode(allocation_ctl, slowpath_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
2650   transform_later(slowpath_iff);
2651 
2652   Node* slowpath_true = new IfTrueNode(slowpath_iff);
2653   transform_later(slowpath_true);
2654 
2655   CallStaticJavaNode* slow_call = new CallStaticJavaNode(OptoRuntime::store_value_type_fields_Type(),
2656                                                          StubRoutines::store_value_type_fields_to_buf(),
2657                                                          "store_value_type_fields",
2658                                                          call->jvms()->bci(),
2659                                                          TypePtr::BOTTOM);
2660   slow_call->init_req(TypeFunc::Control, slowpath_true);
2661   slow_call->init_req(TypeFunc::Memory, mem);
2662   slow_call->init_req(TypeFunc::I_O, io);
2663   slow_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2664   slow_call->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
2665   slow_call->init_req(TypeFunc::Parms, res);
2666 
2667   Node* slow_ctl = transform_later(new ProjNode(slow_call, TypeFunc::Control));
2668   Node* slow_mem = transform_later(new ProjNode(slow_call, TypeFunc::Memory));
2669   Node* slow_io = transform_later(new ProjNode(slow_call, TypeFunc::I_O));
2670   Node* slow_res = transform_later(new ProjNode(slow_call, TypeFunc::Parms));
2671   Node* slow_catc = transform_later(new CatchNode(slow_ctl, slow_io, 2));
2672   Node* slow_norm = transform_later(new CatchProjNode(slow_catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci));
2673   Node* slow_excp = transform_later(new CatchProjNode(slow_catc, CatchProjNode::catch_all_index,    CatchProjNode::no_handler_bci));
2674 
2675   Node* ex_r = new RegionNode(3);
2676   Node* ex_mem_phi = new PhiNode(ex_r, Type::MEMORY, TypePtr::BOTTOM);
2677   Node* ex_io_phi = new PhiNode(ex_r, Type::ABIO);
2678   ex_r->init_req(1, slow_excp);
2679   ex_mem_phi->init_req(1, slow_mem);
2680   ex_io_phi->init_req(1, slow_io);
2681   ex_r->init_req(2, ex_ctl);
2682   ex_mem_phi->init_req(2, ex_mem);
2683   ex_io_phi->init_req(2, ex_io);
2684 
2685   transform_later(ex_r);
2686   transform_later(ex_mem_phi);
2687   transform_later(ex_io_phi);
2688 
2689   Node* slowpath_false = new IfFalseNode(slowpath_iff);
2690   transform_later(slowpath_false);
2691   Node* rawmem = new StorePNode(slowpath_false, mem, top_adr, TypeRawPtr::BOTTOM, new_top, MemNode::unordered);
2692   transform_later(rawmem);
2693   Node* mark_node = makecon(TypeRawPtr::make((address)markOopDesc::always_locked_prototype()));
2694   rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
2695   rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
2696   if (UseCompressedClassPointers) {
2697     rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT);
2698   }
2699   Node* fixed_block  = make_load(slowpath_false, rawmem, klass_node, in_bytes(InstanceKlass::adr_valueklass_fixed_block_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2700   Node* pack_handler = make_load(slowpath_false, rawmem, fixed_block, in_bytes(ValueKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2701 
2702   CallLeafNoFPNode* handler_call = new CallLeafNoFPNode(OptoRuntime::pack_value_type_Type(),
2703                                                         NULL,
2704                                                         "pack handler",
2705                                                         TypeRawPtr::BOTTOM);
2706   handler_call->init_req(TypeFunc::Control, slowpath_false);
2707   handler_call->init_req(TypeFunc::Memory, rawmem);
2708   handler_call->init_req(TypeFunc::I_O, top());
2709   handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2710   handler_call->init_req(TypeFunc::ReturnAdr, top());
2711   handler_call->init_req(TypeFunc::Parms, pack_handler);
2712   handler_call->init_req(TypeFunc::Parms+1, old_top);
2713 
2714   // We don't know how many values are returned. This assumes the
2715   // worst case, that all available registers are used.
2716   for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
2717     if (domain->field_at(i) == Type::HALF) {
2718       slow_call->init_req(i, top());
2719       handler_call->init_req(i+1, top());
2720       continue;
2721     }
2722     Node* proj = transform_later(new ProjNode(call, i));
2723     slow_call->init_req(i, proj);
2724     handler_call->init_req(i+1, proj);
2725   }
2726 
2727   // We can safepoint at that new call
2728   copy_call_debug_info(call, slow_call);
2729   transform_later(slow_call);
2730   transform_later(handler_call);
2731 
2732   Node* handler_ctl = transform_later(new ProjNode(handler_call, TypeFunc::Control));
2733   rawmem = transform_later(new ProjNode(handler_call, TypeFunc::Memory));
2734   Node* slowpath_false_res = transform_later(new ProjNode(handler_call, TypeFunc::Parms));
2735 
2736   MergeMemNode* slowpath_false_mem = MergeMemNode::make(mem);
2737   slowpath_false_mem->set_memory_at(Compile::AliasIdxRaw, rawmem);
2738   transform_later(slowpath_false_mem);
2739 
2740   Node* r = new RegionNode(4);
2741   Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2742   Node* io_phi = new PhiNode(r, Type::ABIO);
2743   Node* res_phi = new PhiNode(r, TypeInstPtr::BOTTOM);
2744 
2745   r->init_req(1, no_allocation_ctl);
2746   mem_phi->init_req(1, mem);
2747   io_phi->init_req(1, io);
2748   res_phi->init_req(1, no_allocation_res);
2749   r->init_req(2, slow_norm);
2750   mem_phi->init_req(2, slow_mem);
2751   io_phi->init_req(2, slow_io);
2752   res_phi->init_req(2, slow_res);
2753   r->init_req(3, handler_ctl);
2754   mem_phi->init_req(3, slowpath_false_mem);
2755   io_phi->init_req(3, io);
2756   res_phi->init_req(3, slowpath_false_res);
2757 
2758   transform_later(r);
2759   transform_later(mem_phi);
2760   transform_later(io_phi);
2761   transform_later(res_phi);
2762 
2763   assert(projs->nb_resproj == 1, "unexpected number of results");
2764   _igvn.replace_in_uses(projs->fallthrough_catchproj, r);
2765   _igvn.replace_in_uses(projs->fallthrough_memproj, mem_phi);
2766   _igvn.replace_in_uses(projs->fallthrough_ioproj, io_phi);
2767   _igvn.replace_in_uses(projs->resproj[0], res_phi);
2768   _igvn.replace_in_uses(projs->catchall_catchproj, ex_r);
2769   _igvn.replace_in_uses(projs->catchall_memproj, ex_mem_phi);
2770   _igvn.replace_in_uses(projs->catchall_ioproj, ex_io_phi);
2771 
2772   _igvn.replace_node(ctl, projs->fallthrough_catchproj);
2773   _igvn.replace_node(mem, projs->fallthrough_memproj);
2774   _igvn.replace_node(io, projs->fallthrough_ioproj);
2775   _igvn.replace_node(res, projs->resproj[0]);
2776   _igvn.replace_node(ex_ctl, projs->catchall_catchproj);
2777   _igvn.replace_node(ex_mem, projs->catchall_memproj);
2778   _igvn.replace_node(ex_io, projs->catchall_ioproj);
2779  }
2780 
2781 //---------------------------eliminate_macro_nodes----------------------
2782 // Eliminate scalar replaced allocations and associated locks.
2783 void PhaseMacroExpand::eliminate_macro_nodes() {
2784   if (C->macro_count() == 0)
2785     return;
2786 
2787   // First, attempt to eliminate locks
2788   int cnt = C->macro_count();
2789   for (int i=0; i < cnt; i++) {
2790     Node *n = C->macro_node(i);
2791     if (n->is_AbstractLock()) { // Lock and Unlock nodes
2792       // Before elimination mark all associated (same box and obj)
2793       // lock and unlock nodes.
2794       mark_eliminated_locking_nodes(n->as_AbstractLock());
2795     }
2796   }
2797   bool progress = true;
2798   while (progress) {
2799     progress = false;
2800     for (int i = C->macro_count(); i > 0; i--) {


2805         success = eliminate_locking_node(n->as_AbstractLock());
2806       }
2807       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2808       progress = progress || success;
2809     }
2810   }
2811   // Next, attempt to eliminate allocations
2812   _has_locks = false;
2813   progress = true;
2814   while (progress) {
2815     progress = false;
2816     for (int i = C->macro_count(); i > 0; i--) {
2817       Node * n = C->macro_node(i-1);
2818       bool success = false;
2819       debug_only(int old_macro_count = C->macro_count(););
2820       switch (n->class_id()) {
2821       case Node::Class_Allocate:
2822       case Node::Class_AllocateArray:
2823         success = eliminate_allocate_node(n->as_Allocate());
2824         break;
2825       case Node::Class_CallStaticJava: {
2826         CallStaticJavaNode* call = n->as_CallStaticJava();
2827         if (!call->method()->is_method_handle_intrinsic()) {
2828           success = eliminate_boxing_node(n->as_CallStaticJava());
2829         }
2830         break;
2831       }
2832       case Node::Class_Lock:
2833       case Node::Class_Unlock:
2834         assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
2835         _has_locks = true;
2836         break;
2837       case Node::Class_ArrayCopy:
2838         break;
2839       case Node::Class_OuterStripMinedLoop:
2840         break;
2841       default:
2842         assert(n->Opcode() == Op_LoopLimit ||
2843                n->Opcode() == Op_Opaque1   ||
2844                n->Opcode() == Op_Opaque2   ||
2845                n->Opcode() == Op_Opaque3   ||
2846                BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n),
2847                "unknown node type in macro list");
2848       }
2849       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2850       progress = progress || success;
2851     }


2861   // Make sure expansion will not cause node limit to be exceeded.
2862   // Worst case is a macro node gets expanded into about 200 nodes.
2863   // Allow 50% more for optimization.
2864   if (C->check_node_count(C->macro_count() * 300, "out of nodes before macro expansion" ) )
2865     return true;
2866 
2867   // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2868   bool progress = true;
2869   while (progress) {
2870     progress = false;
2871     for (int i = C->macro_count(); i > 0; i--) {
2872       Node * n = C->macro_node(i-1);
2873       bool success = false;
2874       debug_only(int old_macro_count = C->macro_count(););
2875       if (n->Opcode() == Op_LoopLimit) {
2876         // Remove it from macro list and put on IGVN worklist to optimize.
2877         C->remove_macro_node(n);
2878         _igvn._worklist.push(n);
2879         success = true;
2880       } else if (n->Opcode() == Op_CallStaticJava) {
2881         CallStaticJavaNode* call = n->as_CallStaticJava();
2882         if (!call->method()->is_method_handle_intrinsic()) {
2883           // Remove it from macro list and put on IGVN worklist to optimize.
2884           C->remove_macro_node(n);
2885           _igvn._worklist.push(n);
2886           success = true;
2887         }
2888       } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
2889         _igvn.replace_node(n, n->in(1));
2890         success = true;
2891 #if INCLUDE_RTM_OPT
2892       } else if ((n->Opcode() == Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
2893         assert(C->profile_rtm(), "should be used only in rtm deoptimization code");
2894         assert((n->outcnt() == 1) && n->unique_out()->is_Cmp(), "");
2895         Node* cmp = n->unique_out();
2896 #ifdef ASSERT
2897         // Validate graph.
2898         assert((cmp->outcnt() == 1) && cmp->unique_out()->is_Bool(), "");
2899         BoolNode* bol = cmp->unique_out()->as_Bool();
2900         assert((bol->outcnt() == 1) && bol->unique_out()->is_If() &&
2901                (bol->_test._test == BoolTest::ne), "");
2902         IfNode* ifn = bol->unique_out()->as_If();
2903         assert((ifn->outcnt() == 2) &&
2904                ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != NULL, "");
2905 #endif
2906         Node* repl = n->in(1);
2907         if (!_has_locks) {


2947     int macro_count = C->macro_count();
2948     Node * n = C->macro_node(macro_count-1);
2949     assert(n->is_macro(), "only macro nodes expected here");
2950     if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
2951       // node is unreachable, so don't try to expand it
2952       C->remove_macro_node(n);
2953       continue;
2954     }
2955     switch (n->class_id()) {
2956     case Node::Class_Allocate:
2957       expand_allocate(n->as_Allocate());
2958       break;
2959     case Node::Class_AllocateArray:
2960       expand_allocate_array(n->as_AllocateArray());
2961       break;
2962     case Node::Class_Lock:
2963       expand_lock_node(n->as_Lock());
2964       break;
2965     case Node::Class_Unlock:
2966       expand_unlock_node(n->as_Unlock());
2967       break;
2968     case Node::Class_CallStaticJava:
2969       expand_mh_intrinsic_return(n->as_CallStaticJava());
2970       C->remove_macro_node(n);
2971       break;
2972     default:
2973       assert(false, "unknown node type in macro list");
2974     }
2975     assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
2976     if (C->failing())  return true;
2977   }
2978 
2979   _igvn.set_delay_transform(false);
2980   _igvn.optimize();
2981   if (C->failing())  return true;
2982   return false;
2983 }
< prev index next >