< prev index next >

src/hotspot/share/opto/matcher.cpp

Print this page

 417 //------------------------------Fixup_Save_On_Entry----------------------------
 418 // The stated purpose of this routine is to take care of save-on-entry
 419 // registers.  However, the overall goal of the Match phase is to convert into
 420 // machine-specific instructions which have RegMasks to guide allocation.
 421 // So what this procedure really does is put a valid RegMask on each input
 422 // to the machine-specific variations of all Return, TailCall and Halt
 423 // instructions.  It also adds edgs to define the save-on-entry values (and of
 424 // course gives them a mask).
 425 
 426 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 427   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 428   // Do all the pre-defined register masks
 429   rms[TypeFunc::Control  ] = RegMask::Empty;
 430   rms[TypeFunc::I_O      ] = RegMask::Empty;
 431   rms[TypeFunc::Memory   ] = RegMask::Empty;
 432   rms[TypeFunc::ReturnAdr] = ret_adr;
 433   rms[TypeFunc::FramePtr ] = fp;
 434   return rms;
 435 }
 436 


















 437 #define NOF_STACK_MASKS (3*13)
 438 
 439 // Create the initial stack mask used by values spilling to the stack.
 440 // Disallow any debug info in outgoing argument areas by setting the
 441 // initial mask accordingly.
 442 void Matcher::init_first_stack_mask() {
 443 
 444   // Allocate storage for spill masks as masks for the appropriate load type.
 445   RegMask *rms = (RegMask*)C->comp_arena()->AmallocWords(sizeof(RegMask) * NOF_STACK_MASKS);
 446 
 447   // Initialize empty placeholder masks into the newly allocated arena
 448   for (int i = 0; i < NOF_STACK_MASKS; i++) {
 449     new (rms + i) RegMask();
 450   }
 451 
 452   idealreg2spillmask  [Op_RegN] = &rms[0];
 453   idealreg2spillmask  [Op_RegI] = &rms[1];
 454   idealreg2spillmask  [Op_RegL] = &rms[2];
 455   idealreg2spillmask  [Op_RegF] = &rms[3];
 456   idealreg2spillmask  [Op_RegD] = &rms[4];

 525   *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
 526 #ifdef _LP64
 527   *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
 528    idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
 529    idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
 530 #else
 531    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
 532 #endif
 533   *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
 534    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
 535   *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
 536    idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
 537   *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
 538    idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
 539   *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
 540    idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
 541 
 542   if (Matcher::has_predicated_vectors()) {
 543     *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
 544      idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);


 545   }
 546 
 547   if (Matcher::vector_size_supported(T_BYTE,4)) {
 548     *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
 549      idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
 550   } else {
 551     *idealreg2spillmask[Op_VecS] = RegMask::Empty;
 552   }
 553 
 554   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 555     // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
 556     // RA guarantees such alignment since it is needed for Double and Long values.
 557     *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
 558      idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
 559   } else {
 560     *idealreg2spillmask[Op_VecD] = RegMask::Empty;
 561   }
 562 
 563   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 564     // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.

 597   }
 598 
 599   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 600     // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
 601     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 602     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
 603       aligned_stack_mask.Remove(in);
 604       in = OptoReg::add(in, -1);
 605     }
 606      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
 607      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 608     *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
 609      idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
 610   } else {
 611     *idealreg2spillmask[Op_VecZ] = RegMask::Empty;
 612   }
 613 
 614   if (Matcher::supports_scalable_vector()) {
 615     int k = 1;
 616     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);













 617     // Exclude last input arg stack slots to avoid spilling vector register there,
 618     // otherwise vector spills could stomp over stack slots in caller frame.
 619     for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
 620       scalable_stack_mask.Remove(in);
 621       in = OptoReg::add(in, -1);
 622     }
 623 
 624     // For VecA
 625      scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
 626      assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 627     *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
 628      idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
 629   } else {
 630     *idealreg2spillmask[Op_VecA] = RegMask::Empty;
 631   }
 632 
 633   if (UseFPUForSpilling) {
 634     // This mask logic assumes that the spill operations are
 635     // symmetric and that the registers involved are the same size.
 636     // On sparc for instance we may have to use 64 bit moves will

2211     case Op_Jump:
2212       mstack.push(n->in(1), Pre_Visit);     // Switch Value (could be shared)
2213       mstack.push(n->in(0), Pre_Visit);     // Visit Control input
2214       return true;                             // while (mstack.is_nonempty())
2215     case Op_StrComp:
2216     case Op_StrEquals:
2217     case Op_StrIndexOf:
2218     case Op_StrIndexOfChar:
2219     case Op_AryEq:
2220     case Op_HasNegatives:
2221     case Op_StrInflatedCopy:
2222     case Op_StrCompressedCopy:
2223     case Op_EncodeISOArray:
2224     case Op_FmaD:
2225     case Op_FmaF:
2226     case Op_FmaVD:
2227     case Op_FmaVF:
2228     case Op_MacroLogicV:
2229     case Op_LoadVectorMasked:
2230     case Op_VectorCmpMasked:

2231       set_shared(n); // Force result into register (it will be anyways)
2232       break;
2233     case Op_ConP: {  // Convert pointers above the centerline to NUL
2234       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2235       const TypePtr* tp = tn->type()->is_ptr();
2236       if (tp->_ptr == TypePtr::AnyNull) {
2237         tn->set_type(TypePtr::NULL_PTR);
2238       }
2239       break;
2240     }
2241     case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2242       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2243       const TypePtr* tp = tn->type()->make_ptr();
2244       if (tp && tp->_ptr == TypePtr::AnyNull) {
2245         tn->set_type(TypeNarrowOop::NULL_PTR);
2246       }
2247       break;
2248     }
2249     case Op_Binary:         // These are introduced in the Post_Visit state.
2250       ShouldNotReachHere();

2256     default:
2257       if( n->is_Store() ) {
2258         // Do match stores, despite no ideal reg
2259         mem_op = true;
2260         break;
2261       }
2262       if( n->is_Mem() ) { // Loads and LoadStores
2263         mem_op = true;
2264         // Loads must be root of match tree due to prior load conflict
2265         if( C->subsume_loads() == false )
2266           set_shared(n);
2267       }
2268       // Fall into default case
2269       if( !n->ideal_reg() )
2270         set_dontcare(n);  // Unmatchable Nodes
2271   } // end_switch
2272   return false;
2273 }
2274 
2275 void Matcher::find_shared_post_visit(Node* n, uint opcode) {















2276   switch(opcode) {       // Handle some opcodes special
2277     case Op_StorePConditional:
2278     case Op_StoreIConditional:
2279     case Op_StoreLConditional:
2280     case Op_CompareAndExchangeB:
2281     case Op_CompareAndExchangeS:
2282     case Op_CompareAndExchangeI:
2283     case Op_CompareAndExchangeL:
2284     case Op_CompareAndExchangeP:
2285     case Op_CompareAndExchangeN:
2286     case Op_WeakCompareAndSwapB:
2287     case Op_WeakCompareAndSwapS:
2288     case Op_WeakCompareAndSwapI:
2289     case Op_WeakCompareAndSwapL:
2290     case Op_WeakCompareAndSwapP:
2291     case Op_WeakCompareAndSwapN:
2292     case Op_CompareAndSwapB:
2293     case Op_CompareAndSwapS:
2294     case Op_CompareAndSwapI:
2295     case Op_CompareAndSwapL:

2395       n->del_req(4);
2396       n->del_req(3);
2397       break;
2398     }
2399     case Op_CopySignD:
2400     case Op_SignumF:
2401     case Op_SignumD: {
2402       Node* pair = new BinaryNode(n->in(2), n->in(3));
2403       n->set_req(2, pair);
2404       n->del_req(3);
2405       break;
2406     }
2407     case Op_VectorBlend:
2408     case Op_VectorInsert: {
2409       Node* pair = new BinaryNode(n->in(1), n->in(2));
2410       n->set_req(1, pair);
2411       n->set_req(2, n->in(3));
2412       n->del_req(3);
2413       break;
2414     }

2415     case Op_StoreVectorScatter: {
2416       Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2417       n->set_req(MemNode::ValueIn, pair);
2418       n->del_req(MemNode::ValueIn+1);
2419       break;
2420     }









2421     case Op_VectorMaskCmp: {
2422       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2423       n->set_req(2, n->in(3));
2424       n->del_req(3);
2425       break;
2426     }
2427     default:
2428       break;
2429   }
2430 }
2431 
2432 #ifndef PRODUCT
2433 void Matcher::record_new2old(Node* newn, Node* old) {
2434   _new2old_map.map(newn->_idx, old);
2435   if (!_reused.test_set(old->_igv_idx)) {
2436     // Reuse the Ideal-level IGV identifier so that the node can be tracked
2437     // across matching. If there are multiple machine nodes expanded from the
2438     // same Ideal node, only one will reuse its IGV identifier.
2439     newn->_igv_idx = old->_igv_idx;
2440   }

 417 //------------------------------Fixup_Save_On_Entry----------------------------
 418 // The stated purpose of this routine is to take care of save-on-entry
 419 // registers.  However, the overall goal of the Match phase is to convert into
 420 // machine-specific instructions which have RegMasks to guide allocation.
 421 // So what this procedure really does is put a valid RegMask on each input
 422 // to the machine-specific variations of all Return, TailCall and Halt
 423 // instructions.  It also adds edgs to define the save-on-entry values (and of
 424 // course gives them a mask).
 425 
 426 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 427   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 428   // Do all the pre-defined register masks
 429   rms[TypeFunc::Control  ] = RegMask::Empty;
 430   rms[TypeFunc::I_O      ] = RegMask::Empty;
 431   rms[TypeFunc::Memory   ] = RegMask::Empty;
 432   rms[TypeFunc::ReturnAdr] = ret_adr;
 433   rms[TypeFunc::FramePtr ] = fp;
 434   return rms;
 435 }
 436 
 437 const int Matcher::scalable_predicate_reg_slots() {
 438   assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(),
 439         "scalable predicate vector should be supported");
 440   int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte;
 441   // We assume each predicate register is one-eighth of the size of
 442   // scalable vector register, one mask bit per vector byte.
 443   int predicate_reg_bit_size = vector_reg_bit_size >> 3;
 444   // Compute number of slots which is required when scalable predicate
 445   // register is spilled. E.g. if scalable vector register is 640 bits,
 446   // predicate register is 80 bits, which is 2.5 * slots.
 447   // We will round up the slot number to power of 2, which is required
 448   // by find_first_set().
 449   int slots = predicate_reg_bit_size & (BitsPerInt - 1)
 450               ? (predicate_reg_bit_size >> LogBitsPerInt) + 1
 451               : predicate_reg_bit_size >> LogBitsPerInt;
 452   return round_up_power_of_2(slots);
 453 }
 454 
 455 #define NOF_STACK_MASKS (3*13)
 456 
 457 // Create the initial stack mask used by values spilling to the stack.
 458 // Disallow any debug info in outgoing argument areas by setting the
 459 // initial mask accordingly.
 460 void Matcher::init_first_stack_mask() {
 461 
 462   // Allocate storage for spill masks as masks for the appropriate load type.
 463   RegMask *rms = (RegMask*)C->comp_arena()->AmallocWords(sizeof(RegMask) * NOF_STACK_MASKS);
 464 
 465   // Initialize empty placeholder masks into the newly allocated arena
 466   for (int i = 0; i < NOF_STACK_MASKS; i++) {
 467     new (rms + i) RegMask();
 468   }
 469 
 470   idealreg2spillmask  [Op_RegN] = &rms[0];
 471   idealreg2spillmask  [Op_RegI] = &rms[1];
 472   idealreg2spillmask  [Op_RegL] = &rms[2];
 473   idealreg2spillmask  [Op_RegF] = &rms[3];
 474   idealreg2spillmask  [Op_RegD] = &rms[4];

 543   *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
 544 #ifdef _LP64
 545   *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
 546    idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
 547    idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
 548 #else
 549    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
 550 #endif
 551   *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
 552    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
 553   *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
 554    idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
 555   *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
 556    idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
 557   *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
 558    idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
 559 
 560   if (Matcher::has_predicated_vectors()) {
 561     *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
 562      idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);
 563   } else {
 564     *idealreg2spillmask[Op_RegVectMask] = RegMask::Empty;
 565   }
 566 
 567   if (Matcher::vector_size_supported(T_BYTE,4)) {
 568     *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
 569      idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
 570   } else {
 571     *idealreg2spillmask[Op_VecS] = RegMask::Empty;
 572   }
 573 
 574   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 575     // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
 576     // RA guarantees such alignment since it is needed for Double and Long values.
 577     *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
 578      idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
 579   } else {
 580     *idealreg2spillmask[Op_VecD] = RegMask::Empty;
 581   }
 582 
 583   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 584     // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.

 617   }
 618 
 619   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 620     // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
 621     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 622     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
 623       aligned_stack_mask.Remove(in);
 624       in = OptoReg::add(in, -1);
 625     }
 626      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
 627      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 628     *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
 629      idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
 630   } else {
 631     *idealreg2spillmask[Op_VecZ] = RegMask::Empty;
 632   }
 633 
 634   if (Matcher::supports_scalable_vector()) {
 635     int k = 1;
 636     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 637     // Exclude last input arg stack slots to avoid spilling vector register there,
 638     // otherwise RegVectMask spills could stomp over stack slots in caller frame.
 639     for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
 640       scalable_stack_mask.Remove(in);
 641       in = OptoReg::add(in, -1);
 642     }
 643 
 644     // For RegVectMask
 645     scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
 646     assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 647     *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
 648     idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask);
 649 
 650     // Exclude last input arg stack slots to avoid spilling vector register there,
 651     // otherwise vector spills could stomp over stack slots in caller frame.
 652     for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
 653       scalable_stack_mask.Remove(in);
 654       in = OptoReg::add(in, -1);
 655     }
 656 
 657     // For VecA
 658      scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
 659      assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 660     *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
 661      idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
 662   } else {
 663     *idealreg2spillmask[Op_VecA] = RegMask::Empty;
 664   }
 665 
 666   if (UseFPUForSpilling) {
 667     // This mask logic assumes that the spill operations are
 668     // symmetric and that the registers involved are the same size.
 669     // On sparc for instance we may have to use 64 bit moves will

2244     case Op_Jump:
2245       mstack.push(n->in(1), Pre_Visit);     // Switch Value (could be shared)
2246       mstack.push(n->in(0), Pre_Visit);     // Visit Control input
2247       return true;                             // while (mstack.is_nonempty())
2248     case Op_StrComp:
2249     case Op_StrEquals:
2250     case Op_StrIndexOf:
2251     case Op_StrIndexOfChar:
2252     case Op_AryEq:
2253     case Op_HasNegatives:
2254     case Op_StrInflatedCopy:
2255     case Op_StrCompressedCopy:
2256     case Op_EncodeISOArray:
2257     case Op_FmaD:
2258     case Op_FmaF:
2259     case Op_FmaVD:
2260     case Op_FmaVF:
2261     case Op_MacroLogicV:
2262     case Op_LoadVectorMasked:
2263     case Op_VectorCmpMasked:
2264     case Op_VectorLoadMask:
2265       set_shared(n); // Force result into register (it will be anyways)
2266       break;
2267     case Op_ConP: {  // Convert pointers above the centerline to NUL
2268       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2269       const TypePtr* tp = tn->type()->is_ptr();
2270       if (tp->_ptr == TypePtr::AnyNull) {
2271         tn->set_type(TypePtr::NULL_PTR);
2272       }
2273       break;
2274     }
2275     case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2276       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2277       const TypePtr* tp = tn->type()->make_ptr();
2278       if (tp && tp->_ptr == TypePtr::AnyNull) {
2279         tn->set_type(TypeNarrowOop::NULL_PTR);
2280       }
2281       break;
2282     }
2283     case Op_Binary:         // These are introduced in the Post_Visit state.
2284       ShouldNotReachHere();

2290     default:
2291       if( n->is_Store() ) {
2292         // Do match stores, despite no ideal reg
2293         mem_op = true;
2294         break;
2295       }
2296       if( n->is_Mem() ) { // Loads and LoadStores
2297         mem_op = true;
2298         // Loads must be root of match tree due to prior load conflict
2299         if( C->subsume_loads() == false )
2300           set_shared(n);
2301       }
2302       // Fall into default case
2303       if( !n->ideal_reg() )
2304         set_dontcare(n);  // Unmatchable Nodes
2305   } // end_switch
2306   return false;
2307 }
2308 
2309 void Matcher::find_shared_post_visit(Node* n, uint opcode) {
2310   if (n->is_predicated_vector()) {
2311     // Restructure into binary trees for Matching.
2312     if (n->req() == 4) {
2313       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2314       n->set_req(2, n->in(3));
2315       n->del_req(3);
2316     } else if (n->req() == 5) {
2317       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2318       n->set_req(2, new BinaryNode(n->in(3), n->in(4)));
2319       n->del_req(4);
2320       n->del_req(3);
2321     }
2322     return;
2323   }
2324 
2325   switch(opcode) {       // Handle some opcodes special
2326     case Op_StorePConditional:
2327     case Op_StoreIConditional:
2328     case Op_StoreLConditional:
2329     case Op_CompareAndExchangeB:
2330     case Op_CompareAndExchangeS:
2331     case Op_CompareAndExchangeI:
2332     case Op_CompareAndExchangeL:
2333     case Op_CompareAndExchangeP:
2334     case Op_CompareAndExchangeN:
2335     case Op_WeakCompareAndSwapB:
2336     case Op_WeakCompareAndSwapS:
2337     case Op_WeakCompareAndSwapI:
2338     case Op_WeakCompareAndSwapL:
2339     case Op_WeakCompareAndSwapP:
2340     case Op_WeakCompareAndSwapN:
2341     case Op_CompareAndSwapB:
2342     case Op_CompareAndSwapS:
2343     case Op_CompareAndSwapI:
2344     case Op_CompareAndSwapL:

2444       n->del_req(4);
2445       n->del_req(3);
2446       break;
2447     }
2448     case Op_CopySignD:
2449     case Op_SignumF:
2450     case Op_SignumD: {
2451       Node* pair = new BinaryNode(n->in(2), n->in(3));
2452       n->set_req(2, pair);
2453       n->del_req(3);
2454       break;
2455     }
2456     case Op_VectorBlend:
2457     case Op_VectorInsert: {
2458       Node* pair = new BinaryNode(n->in(1), n->in(2));
2459       n->set_req(1, pair);
2460       n->set_req(2, n->in(3));
2461       n->del_req(3);
2462       break;
2463     }
2464     case Op_LoadVectorGatherMasked:
2465     case Op_StoreVectorScatter: {
2466       Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2467       n->set_req(MemNode::ValueIn, pair);
2468       n->del_req(MemNode::ValueIn+1);
2469       break;
2470     }
2471     case Op_StoreVectorScatterMasked: {
2472       Node* pair = new BinaryNode(n->in(MemNode::ValueIn+1), n->in(MemNode::ValueIn+2));
2473       n->set_req(MemNode::ValueIn+1, pair);
2474       n->del_req(MemNode::ValueIn+2);
2475       pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2476       n->set_req(MemNode::ValueIn, pair);
2477       n->del_req(MemNode::ValueIn+1);
2478       break;
2479     }
2480     case Op_VectorMaskCmp: {
2481       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2482       n->set_req(2, n->in(3));
2483       n->del_req(3);
2484       break;
2485     }
2486     default:
2487       break;
2488   }
2489 }
2490 
2491 #ifndef PRODUCT
2492 void Matcher::record_new2old(Node* newn, Node* old) {
2493   _new2old_map.map(newn->_idx, old);
2494   if (!_reused.test_set(old->_igv_idx)) {
2495     // Reuse the Ideal-level IGV identifier so that the node can be tracked
2496     // across matching. If there are multiple machine nodes expanded from the
2497     // same Ideal node, only one will reuse its IGV identifier.
2498     newn->_igv_idx = old->_igv_idx;
2499   }
< prev index next >