1 /*
   2  * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciSymbols.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "opto/library_call.hpp"
  28 #include "opto/runtime.hpp"
  29 #include "opto/vectornode.hpp"
  30 #include "prims/vectorSupport.hpp"
  31 #include "runtime/stubRoutines.hpp"
  32 
  33 #ifdef ASSERT
  34 static bool is_vector(ciKlass* klass) {
  35   return klass->is_subclass_of(ciEnv::current()->vector_VectorPayload_klass());
  36 }
  37 
  38 static bool check_vbox(const TypeInstPtr* vbox_type) {
  39   assert(vbox_type->klass_is_exact(), "");
  40 
  41   ciInstanceKlass* ik = vbox_type->instance_klass();
  42   assert(is_vector(ik), "not a vector");
  43 
  44   ciField* fd1 = ik->get_field_by_name(ciSymbols::ETYPE_name(), ciSymbols::class_signature(), /* is_static */ true);
  45   assert(fd1 != nullptr, "element type info is missing");
  46 
  47   ciConstant val1 = fd1->constant_value();
  48   BasicType elem_bt = val1.as_object()->as_instance()->java_mirror_type()->basic_type();
  49   assert(is_java_primitive(elem_bt), "element type info is missing");
  50 
  51   ciField* fd2 = ik->get_field_by_name(ciSymbols::VLENGTH_name(), ciSymbols::int_signature(), /* is_static */ true);
  52   assert(fd2 != nullptr, "vector length info is missing");
  53 
  54   ciConstant val2 = fd2->constant_value();
  55   assert(val2.as_int() > 0, "vector length info is missing");
  56 
  57   return true;
  58 }
  59 #endif
  60 
  61 #define log_if_needed(...)        \
  62   if (C->print_intrinsics()) {    \
  63     tty->print_cr(__VA_ARGS__);   \
  64   }
  65 
  66 #ifndef PRODUCT
  67 #define non_product_log_if_needed(...) log_if_needed(__VA_ARGS__)
  68 #else
  69 #define non_product_log_if_needed(...)
  70 #endif
  71 
  72 static bool is_vector_mask(ciKlass* klass) {
  73   return klass->is_subclass_of(ciEnv::current()->vector_VectorMask_klass());
  74 }
  75 
  76 bool LibraryCallKit::arch_supports_vector_rotate(int opc, int num_elem, BasicType elem_bt,
  77                                                  VectorMaskUseType mask_use_type, bool has_scalar_args) {
  78   bool is_supported = true;
  79 
  80   // has_scalar_args flag is true only for non-constant scalar shift count,
  81   // since in this case shift needs to be broadcasted.
  82   if (!Matcher::match_rule_supported_vector(opc, num_elem, elem_bt) ||
  83        (has_scalar_args && !arch_supports_vector(Op_Replicate, num_elem, elem_bt, VecMaskNotUsed))) {
  84     is_supported = false;
  85   }
  86 
  87   if (is_supported) {
  88     // Check if mask unboxing is supported, this is a two step process which first loads the contents
  89     // of boolean array into vector followed by either lane expansion to match the lane size of masked
  90     // vector operation or populate the predicate register.
  91     if ((mask_use_type & VecMaskUseLoad) != 0) {
  92       if (!Matcher::match_rule_supported_vector(Op_VectorLoadMask, num_elem, elem_bt) ||
  93           !Matcher::match_rule_supported_vector(Op_LoadVector, num_elem, T_BOOLEAN)) {
  94         non_product_log_if_needed("  ** Rejected vector mask loading (%s,%s,%d) because architecture does not support it",
  95                                   NodeClassNames[Op_VectorLoadMask], type2name(elem_bt), num_elem);
  96         return false;
  97       }
  98     }
  99 
 100     if ((mask_use_type & VecMaskUsePred) != 0) {
 101       if (!Matcher::has_predicated_vectors() ||
 102           !Matcher::match_rule_supported_vector_masked(opc, num_elem, elem_bt)) {
 103         non_product_log_if_needed("Rejected vector mask predicate using (%s,%s,%d) because architecture does not support it",
 104                                   NodeClassNames[opc], type2name(elem_bt), num_elem);
 105         return false;
 106       }
 107     }
 108   }
 109 
 110   int lshiftopc, rshiftopc;
 111   switch(elem_bt) {
 112     case T_BYTE:
 113       lshiftopc = Op_LShiftI;
 114       rshiftopc = Op_URShiftB;
 115       break;
 116     case T_SHORT:
 117       lshiftopc = Op_LShiftI;
 118       rshiftopc = Op_URShiftS;
 119       break;
 120     case T_INT:
 121       lshiftopc = Op_LShiftI;
 122       rshiftopc = Op_URShiftI;
 123       break;
 124     case T_LONG:
 125       lshiftopc = Op_LShiftL;
 126       rshiftopc = Op_URShiftL;
 127       break;
 128     default: fatal("Unexpected type: %s", type2name(elem_bt));
 129   }
 130   int lshiftvopc = VectorNode::opcode(lshiftopc, elem_bt);
 131   int rshiftvopc = VectorNode::opcode(rshiftopc, elem_bt);
 132   if (!is_supported &&
 133       arch_supports_vector(lshiftvopc, num_elem, elem_bt, VecMaskNotUsed, has_scalar_args) &&
 134       arch_supports_vector(rshiftvopc, num_elem, elem_bt, VecMaskNotUsed, has_scalar_args) &&
 135       arch_supports_vector(Op_OrV, num_elem, elem_bt, VecMaskNotUsed)) {
 136     is_supported = true;
 137   }
 138   return is_supported;
 139 }
 140 
 141 Node* GraphKit::box_vector(Node* vector, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception) {
 142   assert(EnableVectorSupport, "");
 143 
 144   PreserveReexecuteState preexecs(this);
 145   jvms()->set_should_reexecute(true);
 146 
 147   VectorBoxAllocateNode* alloc = new VectorBoxAllocateNode(C, vbox_type);
 148   set_edges_for_java_call(alloc, /*must_throw=*/false, /*separate_io_proj=*/true);
 149   make_slow_call_ex(alloc, env()->Throwable_klass(), /*separate_io_proj=*/true, deoptimize_on_exception);
 150   set_i_o(gvn().transform( new ProjNode(alloc, TypeFunc::I_O) ));
 151   set_all_memory(gvn().transform( new ProjNode(alloc, TypeFunc::Memory) ));
 152   Node* ret = gvn().transform(new ProjNode(alloc, TypeFunc::Parms));
 153 
 154   assert(check_vbox(vbox_type), "");
 155   const TypeVect* vt = TypeVect::make(elem_bt, num_elem, is_vector_mask(vbox_type->instance_klass()));
 156   VectorBoxNode* vbox = new VectorBoxNode(C, ret, vector, vbox_type, vt);
 157   return gvn().transform(vbox);
 158 }
 159 
 160 Node* GraphKit::unbox_vector(Node* v, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem) {
 161   assert(EnableVectorSupport, "");
 162   const TypeInstPtr* vbox_type_v = gvn().type(v)->isa_instptr();
 163   if (vbox_type_v == nullptr || vbox_type->instance_klass() != vbox_type_v->instance_klass()) {
 164     return nullptr; // arguments don't agree on vector shapes
 165   }
 166   if (vbox_type_v->maybe_null()) {
 167     return nullptr; // no nulls are allowed
 168   }
 169   assert(check_vbox(vbox_type), "");
 170   const TypeVect* vt = TypeVect::make(elem_bt, num_elem, is_vector_mask(vbox_type->instance_klass()));
 171   Node* unbox = gvn().transform(new VectorUnboxNode(C, vt, v, merged_memory()));
 172   return unbox;
 173 }
 174 
 175 Node* GraphKit::vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem) {
 176   assert(bt == T_INT || bt == T_LONG || bt == T_SHORT || bt == T_BYTE, "byte, short, long and int are supported");
 177   juint mask = (type2aelembytes(bt) * BitsPerByte - 1);
 178   Node* nmask = gvn().transform(ConNode::make(TypeInt::make(mask)));
 179   Node* mcnt = gvn().transform(new AndINode(cnt, nmask));
 180   return gvn().transform(VectorNode::shift_count(shift_op, mcnt, num_elem, bt));
 181 }
 182 
 183 bool LibraryCallKit::arch_supports_vector(int sopc, int num_elem, BasicType type, VectorMaskUseType mask_use_type, bool has_scalar_args) {
 184   // Check that the operation is valid.
 185   if (sopc <= 0) {
 186     non_product_log_if_needed("  ** Rejected intrinsification because no valid vector op could be extracted");
 187     return false;
 188   }
 189 
 190   if (VectorNode::is_vector_rotate(sopc)) {
 191     if(!arch_supports_vector_rotate(sopc, num_elem, type, mask_use_type, has_scalar_args)) {
 192       non_product_log_if_needed("  ** Rejected vector op (%s,%s,%d) because architecture does not support variable vector shifts",
 193                                 NodeClassNames[sopc], type2name(type), num_elem);
 194       return false;
 195     }
 196   } else if (VectorNode::is_vector_integral_negate(sopc)) {
 197     if (!VectorNode::is_vector_integral_negate_supported(sopc, num_elem, type, false)) {
 198       non_product_log_if_needed("  ** Rejected vector op (%s,%s,%d) because architecture does not support integral vector negate",
 199                                 NodeClassNames[sopc], type2name(type), num_elem);
 200       return false;
 201     }
 202   } else {
 203     // Check that architecture supports this op-size-type combination.
 204     if (!Matcher::match_rule_supported_vector(sopc, num_elem, type)) {
 205       non_product_log_if_needed("  ** Rejected vector op (%s,%s,%d) because architecture does not support it",
 206                                 NodeClassNames[sopc], type2name(type), num_elem);
 207       return false;
 208     } else {
 209       assert(Matcher::match_rule_supported(sopc), "must be supported");
 210     }
 211   }
 212 
 213   if (num_elem == 1) {
 214     if (mask_use_type != VecMaskNotUsed) {
 215       non_product_log_if_needed("  ** Rejected vector mask op (%s,%s,%d) because architecture does not support it",
 216                                 NodeClassNames[sopc], type2name(type), num_elem);
 217       return false;
 218     }
 219 
 220     if (sopc != 0) {
 221       if (sopc != Op_LoadVector && sopc != Op_StoreVector) {
 222         non_product_log_if_needed("  ** Not a svml call or load/store vector op (%s,%s,%d)",
 223                                   NodeClassNames[sopc], type2name(type), num_elem);
 224         return false;
 225       }
 226     }
 227   }
 228 
 229   if (!has_scalar_args && VectorNode::is_vector_shift(sopc) &&
 230       Matcher::supports_vector_variable_shifts() == false) {
 231     log_if_needed("  ** Rejected vector op (%s,%s,%d) because architecture does not support variable vector shifts",
 232                   NodeClassNames[sopc], type2name(type), num_elem);
 233     return false;
 234   }
 235 
 236   // Check if mask unboxing is supported, this is a two step process which first loads the contents
 237   // of boolean array into vector followed by either lane expansion to match the lane size of masked
 238   // vector operation or populate the predicate register.
 239   if ((mask_use_type & VecMaskUseLoad) != 0) {
 240     if (!Matcher::match_rule_supported_vector(Op_VectorLoadMask, num_elem, type) ||
 241         !Matcher::match_rule_supported_vector(Op_LoadVector, num_elem, T_BOOLEAN)) {
 242       non_product_log_if_needed("  ** Rejected vector mask loading (%s,%s,%d) because architecture does not support it",
 243                                 NodeClassNames[Op_VectorLoadMask], type2name(type), num_elem);
 244       return false;
 245     }
 246   }
 247 
 248   // Check if mask boxing is supported, this is a two step process which first stores the contents
 249   // of mask vector / predicate register into a boolean vector followed by vector store operation to
 250   // transfer the contents to underlined storage of mask boxes which is a boolean array.
 251   if ((mask_use_type & VecMaskUseStore) != 0) {
 252     if (!Matcher::match_rule_supported_vector(Op_VectorStoreMask, num_elem, type) ||
 253         !Matcher::match_rule_supported_vector(Op_StoreVector, num_elem, T_BOOLEAN)) {
 254       non_product_log_if_needed("Rejected vector mask storing (%s,%s,%d) because architecture does not support it",
 255                                 NodeClassNames[Op_VectorStoreMask], type2name(type), num_elem);
 256       return false;
 257     }
 258   }
 259 
 260   if ((mask_use_type & VecMaskUsePred) != 0) {
 261     bool is_supported = false;
 262     if (Matcher::has_predicated_vectors()) {
 263       if (VectorNode::is_vector_integral_negate(sopc)) {
 264         is_supported = VectorNode::is_vector_integral_negate_supported(sopc, num_elem, type, true);
 265       } else {
 266         is_supported = Matcher::match_rule_supported_vector_masked(sopc, num_elem, type);
 267       }
 268     }
 269     is_supported |= Matcher::supports_vector_predicate_op_emulation(sopc, num_elem, type);
 270 
 271     if (!is_supported) {
 272       non_product_log_if_needed("Rejected vector mask predicate using (%s,%s,%d) because architecture does not support it",
 273                                 NodeClassNames[sopc], type2name(type), num_elem);
 274       return false;
 275     }
 276   }
 277 
 278   return true;
 279 }
 280 
 281 static bool is_klass_initialized(const TypeInstPtr* vec_klass) {
 282   if (vec_klass->const_oop() == nullptr) {
 283     return false; // uninitialized or some kind of unsafe access
 284   }
 285   assert(vec_klass->const_oop()->as_instance()->java_lang_Class_klass() != nullptr, "klass instance expected");
 286   ciInstanceKlass* klass =  vec_klass->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
 287   return klass->is_initialized();
 288 }
 289 
 290 // public static
 291 // <V extends Vector<E>,
 292 //  M extends VectorMask<E>,
 293 //  E>
 294 // V unaryOp(int oprId, Class<? extends V> vmClass, Class<? extends M> maskClass, Class<E> elementType,
 295 //           int length, V v, M m,
 296 //           UnaryOperation<V, M> defaultImpl)
 297 //
 298 // public static
 299 // <V,
 300 //  M extends VectorMask<E>,
 301 //  E>
 302 // V binaryOp(int oprId, Class<? extends V> vmClass, Class<? extends M> maskClass, Class<E> elementType,
 303 //            int length, V v1, V v2, M m,
 304 //            BinaryOperation<V, M> defaultImpl)
 305 //
 306 // public static
 307 // <V extends Vector<E>,
 308 //  M extends VectorMask<E>,
 309 //  E>
 310 // V ternaryOp(int oprId, Class<? extends V> vmClass, Class<? extends M> maskClass, Class<E> elementType,
 311 //             int length, V v1, V v2, V v3, M m,
 312 //             TernaryOperation<V, M> defaultImpl)
 313 //
 314 bool LibraryCallKit::inline_vector_nary_operation(int n) {
 315   const TypeInt*     opr          = gvn().type(argument(0))->isa_int();
 316   const TypeInstPtr* vector_klass = gvn().type(argument(1))->isa_instptr();
 317   const TypeInstPtr* mask_klass   = gvn().type(argument(2))->isa_instptr();
 318   const TypeInstPtr* elem_klass   = gvn().type(argument(3))->isa_instptr();
 319   const TypeInt*     vlen         = gvn().type(argument(4))->isa_int();
 320 
 321   if (opr          == nullptr || !opr->is_con() ||
 322       vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
 323       elem_klass   == nullptr || elem_klass->const_oop()   == nullptr ||
 324       vlen         == nullptr || !vlen->is_con()) {
 325     log_if_needed("  ** missing constant: opr=%s vclass=%s etype=%s vlen=%s",
 326                     NodeClassNames[argument(0)->Opcode()],
 327                     NodeClassNames[argument(1)->Opcode()],
 328                     NodeClassNames[argument(3)->Opcode()],
 329                     NodeClassNames[argument(4)->Opcode()]);
 330     return false; // not enough info for intrinsification
 331   }
 332 
 333   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
 334   if (!elem_type->is_primitive_type()) {
 335     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
 336     return false; // should be primitive type
 337   }
 338   if (!is_klass_initialized(vector_klass)) {
 339     log_if_needed("  ** klass argument not initialized");
 340     return false;
 341   }
 342 
 343   // "argument(n + 5)" should be the mask object. We assume it is "null" when no mask
 344   // is used to control this operation.
 345   const Type* vmask_type = gvn().type(argument(n + 5));
 346   bool is_masked_op = vmask_type != TypePtr::NULL_PTR;
 347   if (is_masked_op) {
 348     if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) {
 349       log_if_needed("  ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]);
 350       return false; // not enough info for intrinsification
 351     }
 352 
 353     if (!is_klass_initialized(mask_klass)) {
 354       log_if_needed("  ** mask klass argument not initialized");
 355       return false;
 356     }
 357 
 358     if (vmask_type->maybe_null()) {
 359       log_if_needed("  ** null mask values are not allowed for masked op");
 360       return false;
 361     }
 362   }
 363 
 364   BasicType elem_bt = elem_type->basic_type();
 365   bool has_scalar_op = VectorSupport::has_scalar_op(opr->get_con());
 366   bool is_unsigned = VectorSupport::is_unsigned_op(opr->get_con());
 367 
 368   int num_elem = vlen->get_con();
 369   int opc = VectorSupport::vop2ideal(opr->get_con(), elem_bt);
 370   int sopc = has_scalar_op ? VectorNode::opcode(opc, elem_bt) : opc;
 371   if (sopc == 0 || num_elem == 1) {
 372     log_if_needed("  ** operation not supported: arity=%d opc=%s[%d] vlen=%d etype=%s",
 373                     n, NodeClassNames[opc], opc, num_elem, type2name(elem_bt));
 374     return false; // operation not supported
 375   }
 376   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
 377   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
 378 
 379   if (is_vector_mask(vbox_klass)) {
 380     assert(!is_masked_op, "mask operations do not need mask to control");
 381   }
 382 
 383   // When using mask, mask use type needs to be VecMaskUseLoad.
 384   VectorMaskUseType mask_use_type = is_vector_mask(vbox_klass) ? VecMaskUseAll
 385                                       : is_masked_op ? VecMaskUseLoad : VecMaskNotUsed;
 386   if ((sopc != 0) && !arch_supports_vector(sopc, num_elem, elem_bt, mask_use_type)) {
 387     log_if_needed("  ** not supported: arity=%d opc=%d vlen=%d etype=%s ismask=%d is_masked_op=%d",
 388                     n, sopc, num_elem, type2name(elem_bt),
 389                     is_vector_mask(vbox_klass) ? 1 : 0, is_masked_op ? 1 : 0);
 390     return false; // not supported
 391   }
 392 
 393   // Return true if current platform has implemented the masked operation with predicate feature.
 394   bool use_predicate = is_masked_op && sopc != 0 && arch_supports_vector(sopc, num_elem, elem_bt, VecMaskUsePred);
 395   if (is_masked_op && !use_predicate && !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)) {
 396     log_if_needed("  ** not supported: arity=%d opc=%d vlen=%d etype=%s ismask=0 is_masked_op=1",
 397                     n, sopc, num_elem, type2name(elem_bt));
 398     return false;
 399   }
 400 
 401   Node* opd1 = nullptr; Node* opd2 = nullptr; Node* opd3 = nullptr;
 402   switch (n) {
 403     case 3: {
 404       opd3 = unbox_vector(argument(7), vbox_type, elem_bt, num_elem);
 405       if (opd3 == nullptr) {
 406         log_if_needed("  ** unbox failed v3=%s",
 407                         NodeClassNames[argument(7)->Opcode()]);
 408         return false;
 409       }
 410       // fall-through
 411     }
 412     case 2: {
 413       opd2 = unbox_vector(argument(6), vbox_type, elem_bt, num_elem);
 414       if (opd2 == nullptr) {
 415         log_if_needed("  ** unbox failed v2=%s",
 416                         NodeClassNames[argument(6)->Opcode()]);
 417         return false;
 418       }
 419       // fall-through
 420     }
 421     case 1: {
 422       opd1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
 423       if (opd1 == nullptr) {
 424         log_if_needed("  ** unbox failed v1=%s",
 425                         NodeClassNames[argument(5)->Opcode()]);
 426         return false;
 427       }
 428       break;
 429     }
 430     default: fatal("unsupported arity: %d", n);
 431   }
 432 
 433   Node* mask = nullptr;
 434   if (is_masked_op) {
 435     ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
 436     assert(is_vector_mask(mbox_klass), "argument(2) should be a mask class");
 437     const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
 438     mask = unbox_vector(argument(n + 5), mbox_type, elem_bt, num_elem);
 439     if (mask == nullptr) {
 440       log_if_needed("  ** unbox failed mask=%s",
 441                       NodeClassNames[argument(n + 5)->Opcode()]);
 442       return false;
 443     }
 444   }
 445 
 446   Node* operation = nullptr;
 447   const TypeVect* vt = TypeVect::make(elem_bt, num_elem, is_vector_mask(vbox_klass));
 448   switch (n) {
 449     case 1:
 450     case 2: {
 451       operation = VectorNode::make(sopc, opd1, opd2, vt, is_vector_mask(vbox_klass), VectorNode::is_shift_opcode(opc), is_unsigned);
 452       break;
 453     }
 454     case 3: {
 455       operation = VectorNode::make(sopc, opd1, opd2, opd3, vt);
 456       break;
 457     }
 458     default: fatal("unsupported arity: %d", n);
 459   }
 460 
 461   if (is_masked_op && mask != nullptr) {
 462     if (use_predicate) {
 463       operation->add_req(mask);
 464       operation->add_flag(Node::Flag_is_predicated_vector);
 465     } else {
 466       operation->add_flag(Node::Flag_is_predicated_using_blend);
 467       operation = gvn().transform(operation);
 468       operation = new VectorBlendNode(opd1, operation, mask);
 469     }
 470   }
 471   operation = gvn().transform(operation);
 472 
 473   // Wrap it up in VectorBox to keep object type information.
 474   Node* vbox = box_vector(operation, vbox_type, elem_bt, num_elem);
 475   set_result(vbox);
 476   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
 477   return true;
 478 }
 479 
 480 // public static
 481 // <V extends Vector<E>, E>
 482 // V libraryUnaryOp(long address, Class<? extends V> vClass, Class<E> elementType, int length, String debugName,
 483 //                  V v,
 484 //                  UnaryOperation<V, ?> defaultImpl)
 485 //
 486 // public static
 487 // <V extends VectorPayload, E>
 488 // V libraryBinaryOp(long address, Class<? extends V> vClass, Class<E> elementType, int length, String debugName,
 489 //            V v1, V v2,
 490 //            BinaryOperation<V, ?> defaultImpl)
 491 bool LibraryCallKit::inline_vector_call(int arity) {
 492   assert(Matcher::supports_vector_calling_convention(), "required");
 493 
 494   const TypeLong*    entry          = gvn().type(argument(0))->isa_long();
 495   const TypeInstPtr* vector_klass   = gvn().type(argument(2))->isa_instptr();
 496   const TypeInstPtr* elem_klass     = gvn().type(argument(3))->isa_instptr();
 497   const TypeInt*     vlen           = gvn().type(argument(4))->isa_int();
 498   const TypeInstPtr* debug_name_oop = gvn().type(argument(5))->isa_instptr();
 499 
 500   if (entry        == nullptr   || !entry->is_con() ||
 501       vector_klass == nullptr   || vector_klass->const_oop() == nullptr ||
 502       elem_klass   == nullptr   || elem_klass->const_oop() == nullptr ||
 503       vlen         == nullptr   || !vlen->is_con() ||
 504       debug_name_oop == nullptr || debug_name_oop->const_oop() == nullptr) {
 505     log_if_needed("  ** missing constant: opr=%s vclass=%s etype=%s vlen=%s debug_name=%s",
 506                   NodeClassNames[argument(0)->Opcode()],
 507                   NodeClassNames[argument(2)->Opcode()],
 508                   NodeClassNames[argument(3)->Opcode()],
 509                   NodeClassNames[argument(4)->Opcode()],
 510                   NodeClassNames[argument(5)->Opcode()]);
 511     return false; // not enough info for intrinsification
 512   }
 513 
 514   if (entry->get_con() == 0) {
 515     log_if_needed("  ** missing entry point");
 516     return false;
 517   }
 518 
 519   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
 520   if (!elem_type->is_primitive_type()) {
 521     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
 522     return false; // should be primitive type
 523   }
 524   if (!is_klass_initialized(vector_klass)) {
 525     log_if_needed("  ** klass argument not initialized");
 526     return false;
 527   }
 528 
 529   BasicType elem_bt = elem_type->basic_type();
 530   int num_elem = vlen->get_con();
 531   if (!Matcher::vector_size_supported(elem_bt, num_elem)) {
 532     log_if_needed("  ** vector size (vlen=%d, etype=%s) is not supported",
 533                   num_elem, type2name(elem_bt));
 534     return false;
 535   }
 536 
 537   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
 538   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
 539 
 540   Node* opd1 = unbox_vector(argument(6), vbox_type, elem_bt, num_elem);
 541   if (opd1 == nullptr) {
 542     log_if_needed("  ** unbox failed v1=%s", NodeClassNames[argument(6)->Opcode()]);
 543     return false;
 544   }
 545 
 546   Node* opd2 = nullptr;
 547   if (arity > 1) {
 548     opd2 = unbox_vector(argument(7), vbox_type, elem_bt, num_elem);
 549     if (opd2 == nullptr) {
 550       log_if_needed("  ** unbox failed v2=%s", NodeClassNames[argument(7)->Opcode()]);
 551       return false;
 552     }
 553   }
 554   assert(arity == 1 || arity == 2, "arity %d not supported", arity);
 555   const TypeVect* vt = TypeVect::make(elem_bt, num_elem);
 556   const TypeFunc* call_type = OptoRuntime::Math_Vector_Vector_Type(arity, vt, vt);
 557   address entry_addr = (address)entry->get_con();
 558 
 559   const char* debug_name = "<unknown>";
 560   if (!debug_name_oop->const_oop()->is_null_object()) {
 561     size_t buflen = 100;
 562     char* buf = NEW_ARENA_ARRAY(C->comp_arena(), char, buflen);
 563     debug_name = debug_name_oop->const_oop()->as_instance()->java_lang_String_str(buf, buflen);
 564   }
 565   Node* vcall = make_runtime_call(RC_VECTOR,
 566                                   call_type,
 567                                   entry_addr,
 568                                   debug_name,
 569                                   TypePtr::BOTTOM,
 570                                   opd1,
 571                                   opd2);
 572 
 573   vcall = gvn().transform(new ProjNode(gvn().transform(vcall), TypeFunc::Parms));
 574 
 575   // Wrap it up in VectorBox to keep object type information.
 576   Node* vbox = box_vector(vcall, vbox_type, elem_bt, num_elem);
 577   set_result(vbox);
 578   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
 579   return true;
 580 }
 581 
 582 // <E, M>
 583 // long maskReductionCoerced(int oper, Class<? extends M> maskClass, Class<?> elemClass,
 584 //                          int length, M m, VectorMaskOp<M> defaultImpl)
 585 bool LibraryCallKit::inline_vector_mask_operation() {
 586   const TypeInt*     oper       = gvn().type(argument(0))->isa_int();
 587   const TypeInstPtr* mask_klass = gvn().type(argument(1))->isa_instptr();
 588   const TypeInstPtr* elem_klass = gvn().type(argument(2))->isa_instptr();
 589   const TypeInt*     vlen       = gvn().type(argument(3))->isa_int();
 590   Node*              mask       = argument(4);
 591 
 592   if (mask_klass == nullptr || mask_klass->const_oop() == nullptr ||
 593       elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
 594       vlen       == nullptr || !vlen->is_con() ||
 595       oper       == nullptr || !oper->is_con() ||
 596       mask->is_top()) {
 597     return false; // dead code
 598   }
 599 
 600   if (!is_klass_initialized(mask_klass)) {
 601     log_if_needed("  ** klass argument not initialized");
 602     return false;
 603   }
 604 
 605   int num_elem = vlen->get_con();
 606   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
 607   BasicType elem_bt = elem_type->basic_type();
 608 
 609   int mopc = VectorSupport::vop2ideal(oper->get_con(), elem_bt);
 610   if (!arch_supports_vector(mopc, num_elem, elem_bt, VecMaskUseLoad)) {
 611     log_if_needed("  ** not supported: arity=1 op=cast#%d/3 vlen2=%d etype2=%s",
 612                     mopc, num_elem, type2name(elem_bt));
 613     return false; // not supported
 614   }
 615 
 616   ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
 617   const TypeInstPtr* mask_box_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
 618   Node* mask_vec = unbox_vector(mask, mask_box_type, elem_bt, num_elem);
 619   if (mask_vec == nullptr) {
 620     log_if_needed("  ** unbox failed mask=%s",
 621                       NodeClassNames[argument(4)->Opcode()]);
 622     return false;
 623   }
 624 
 625   if (mask_vec->bottom_type()->isa_vectmask() == nullptr) {
 626     mask_vec = gvn().transform(VectorStoreMaskNode::make(gvn(), mask_vec, elem_bt, num_elem));
 627   }
 628   const Type* maskoper_ty = mopc == Op_VectorMaskToLong ? (const Type*)TypeLong::LONG : (const Type*)TypeInt::INT;
 629   Node* maskoper = gvn().transform(VectorMaskOpNode::make(mask_vec, maskoper_ty, mopc));
 630   if (mopc != Op_VectorMaskToLong) {
 631     maskoper = ConvI2L(maskoper);
 632   }
 633   set_result(maskoper);
 634 
 635   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
 636   return true;
 637 }
 638 
 639 // public static
 640 // <M,
 641 //  S extends VectorSpecies<E>,
 642 //  E>
 643 // M fromBitsCoerced(Class<? extends M> vmClass, Class<E> elementType, int length,
 644 //                    long bits, int mode, S s,
 645 //                    BroadcastOperation<M, E, S> defaultImpl)
 646 bool LibraryCallKit::inline_vector_frombits_coerced() {
 647   const TypeInstPtr* vector_klass = gvn().type(argument(0))->isa_instptr();
 648   const TypeInstPtr* elem_klass   = gvn().type(argument(1))->isa_instptr();
 649   const TypeInt*     vlen         = gvn().type(argument(2))->isa_int();
 650   const TypeLong*    bits_type    = gvn().type(argument(3))->isa_long();
 651   // Mode argument determines the mode of operation it can take following values:-
 652   // MODE_BROADCAST for vector Vector.broadcast and VectorMask.maskAll operations.
 653   // MODE_BITS_COERCED_LONG_TO_MASK for VectorMask.fromLong operation.
 654   const TypeInt*     mode         = gvn().type(argument(5))->isa_int();
 655 
 656   if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
 657       elem_klass   == nullptr || elem_klass->const_oop()   == nullptr ||
 658       vlen         == nullptr || !vlen->is_con() ||
 659       bits_type    == nullptr ||
 660       mode         == nullptr || !mode->is_con()) {
 661     log_if_needed("  ** missing constant: vclass=%s etype=%s vlen=%s bitwise=%s",
 662                     NodeClassNames[argument(0)->Opcode()],
 663                     NodeClassNames[argument(1)->Opcode()],
 664                     NodeClassNames[argument(2)->Opcode()],
 665                     NodeClassNames[argument(5)->Opcode()]);
 666     return false; // not enough info for intrinsification
 667   }
 668 
 669   if (!is_klass_initialized(vector_klass)) {
 670     log_if_needed("  ** klass argument not initialized");
 671     return false;
 672   }
 673   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
 674   if (!elem_type->is_primitive_type()) {
 675     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
 676     return false; // should be primitive type
 677   }
 678   BasicType elem_bt = elem_type->basic_type();
 679   int num_elem = vlen->get_con();
 680   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
 681   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
 682 
 683   bool is_mask = is_vector_mask(vbox_klass);
 684   int  bcast_mode = mode->get_con();
 685   VectorMaskUseType checkFlags = (VectorMaskUseType)(is_mask ? VecMaskUseAll : VecMaskNotUsed);
 686   int opc = bcast_mode == VectorSupport::MODE_BITS_COERCED_LONG_TO_MASK ? Op_VectorLongToMask : Op_Replicate;
 687 
 688   if (!arch_supports_vector(opc, num_elem, elem_bt, checkFlags, true /*has_scalar_args*/)) {
 689     log_if_needed("  ** not supported: arity=0 op=broadcast vlen=%d etype=%s ismask=%d bcast_mode=%d",
 690                     num_elem, type2name(elem_bt),
 691                     is_mask ? 1 : 0,
 692                     bcast_mode);
 693     return false; // not supported
 694   }
 695 
 696   Node* broadcast = nullptr;
 697   Node* bits = argument(3);
 698   Node* elem = bits;
 699 
 700   if (opc == Op_VectorLongToMask) {
 701     const TypeVect* vt = TypeVect::makemask(elem_bt, num_elem);
 702     if (vt->isa_vectmask()) {
 703       broadcast = gvn().transform(new VectorLongToMaskNode(elem, vt));
 704     } else {
 705       const TypeVect* mvt = TypeVect::make(T_BOOLEAN, num_elem);
 706       broadcast = gvn().transform(new VectorLongToMaskNode(elem, mvt));
 707       broadcast = gvn().transform(new VectorLoadMaskNode(broadcast, vt));
 708     }
 709   } else {
 710     switch (elem_bt) {
 711       case T_BOOLEAN: // fall-through
 712       case T_BYTE:    // fall-through
 713       case T_SHORT:   // fall-through
 714       case T_CHAR:    // fall-through
 715       case T_INT: {
 716         elem = gvn().transform(new ConvL2INode(bits));
 717         break;
 718       }
 719       case T_DOUBLE: {
 720         elem = gvn().transform(new MoveL2DNode(bits));
 721         break;
 722       }
 723       case T_FLOAT: {
 724         bits = gvn().transform(new ConvL2INode(bits));
 725         elem = gvn().transform(new MoveI2FNode(bits));
 726         break;
 727       }
 728       case T_LONG: {
 729         // no conversion needed
 730         break;
 731       }
 732       default: fatal("%s", type2name(elem_bt));
 733     }
 734     broadcast = VectorNode::scalar2vector(elem, num_elem, elem_bt, is_mask);
 735     broadcast = gvn().transform(broadcast);
 736   }
 737 
 738   Node* box = box_vector(broadcast, vbox_type, elem_bt, num_elem);
 739   set_result(box);
 740   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
 741   return true;
 742 }
 743 
 744 static bool elem_consistent_with_arr(BasicType elem_bt, const TypeAryPtr* arr_type, bool mismatched_ms) {
 745   assert(arr_type != nullptr, "unexpected");
 746   BasicType arr_elem_bt = arr_type->elem()->array_element_basic_type();
 747   if (elem_bt == arr_elem_bt) {
 748     return true;
 749   } else if (elem_bt == T_SHORT && arr_elem_bt == T_CHAR) {
 750     // Load/store of short vector from/to char[] is supported
 751     return true;
 752   } else if (elem_bt == T_BYTE && arr_elem_bt == T_BOOLEAN) {
 753     // Load/store of byte vector from/to boolean[] is supported
 754     return true;
 755   } else {
 756     return mismatched_ms;
 757   }
 758 }
 759 
 760 //  public static
 761 //  <C,
 762 //   VM extends VectorPayload,
 763 //   E,
 764 //   S extends VectorSpecies<E>>
 765 //  VM load(Class<? extends VM> vmClass, Class<E> eClass,
 766 //          int length,
 767 //          Object base, long offset,            // Unsafe addressing
 768 //          boolean fromSegment,
 769 //          C container, long index, S s,        // Arguments for default implementation
 770 //          LoadOperation<C, VM, S> defaultImpl) {
 771 //  public static
 772 //  <C,
 773 //   V extends VectorPayload>
 774 //  void store(Class<?> vClass, Class<?> eClass,
 775 //             int length,
 776 //             Object base, long offset,        // Unsafe addressing
 777 //             boolean fromSegment,
 778 //             V v, C container, long index,    // Arguments for default implementation
 779 //             StoreVectorOperation<C, V> defaultImpl) {
 780 bool LibraryCallKit::inline_vector_mem_operation(bool is_store) {
 781   const TypeInstPtr* vector_klass = gvn().type(argument(0))->isa_instptr();
 782   const TypeInstPtr* elem_klass   = gvn().type(argument(1))->isa_instptr();
 783   const TypeInt*     vlen         = gvn().type(argument(2))->isa_int();
 784   const TypeInt*     from_ms      = gvn().type(argument(6))->isa_int();
 785 
 786   if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
 787       elem_klass   == nullptr || elem_klass->const_oop()   == nullptr ||
 788       vlen         == nullptr || !vlen->is_con() ||
 789       from_ms      == nullptr || !from_ms->is_con()) {
 790     log_if_needed("  ** missing constant: vclass=%s etype=%s vlen=%s from_ms=%s",
 791                     NodeClassNames[argument(0)->Opcode()],
 792                     NodeClassNames[argument(1)->Opcode()],
 793                     NodeClassNames[argument(2)->Opcode()],
 794                     NodeClassNames[argument(6)->Opcode()]);
 795     return false; // not enough info for intrinsification
 796   }
 797   if (!is_klass_initialized(vector_klass)) {
 798     log_if_needed("  ** klass argument not initialized");
 799     return false;
 800   }
 801 
 802   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
 803   if (!elem_type->is_primitive_type()) {
 804     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
 805     return false; // should be primitive type
 806   }
 807   BasicType elem_bt = elem_type->basic_type();
 808   int num_elem = vlen->get_con();
 809 
 810   // TODO When mask usage is supported, VecMaskNotUsed needs to be VecMaskUseLoad.
 811   if (!arch_supports_vector(is_store ? Op_StoreVector : Op_LoadVector, num_elem, elem_bt, VecMaskNotUsed)) {
 812     log_if_needed("  ** not supported: arity=%d op=%s vlen=%d etype=%s ismask=no",
 813                     is_store, is_store ? "store" : "load",
 814                     num_elem, type2name(elem_bt));
 815     return false; // not supported
 816   }
 817 
 818   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
 819   bool is_mask = is_vector_mask(vbox_klass);
 820 
 821   Node* base = argument(3);
 822   Node* offset = ConvL2X(argument(4));
 823 
 824   // Save state and restore on bailout
 825   uint old_sp = sp();
 826   SafePointNode* old_map = clone_map();
 827 
 828   Node* addr = make_unsafe_address(base, offset, (is_mask ? T_BOOLEAN : elem_bt), true);
 829 
 830   // The memory barrier checks are based on ones for unsafe access.
 831   // This is not 1-1 implementation.
 832   const Type *const base_type = gvn().type(base);
 833 
 834   const TypePtr *addr_type = gvn().type(addr)->isa_ptr();
 835   const TypeAryPtr* arr_type = addr_type->isa_aryptr();
 836 
 837   const bool in_native = TypePtr::NULL_PTR == base_type; // base always null
 838   const bool in_heap   = !TypePtr::NULL_PTR->higher_equal(base_type); // base never null
 839 
 840   const bool is_mixed_access = !in_heap && !in_native;
 841 
 842   const bool is_mismatched_access = in_heap && (addr_type->isa_aryptr() == nullptr);
 843 
 844   const bool needs_cpu_membar = is_mixed_access || is_mismatched_access;
 845 
 846   // For non-masked mismatched memory segment vector read/write accesses, intrinsification can continue
 847   // with unknown backing storage type and compiler can skip inserting explicit reinterpretation IR after
 848   // loading from or before storing to backing storage which is mandatory for semantic correctness of
 849   // big-endian memory layout.
 850   bool mismatched_ms = LITTLE_ENDIAN_ONLY(false)
 851       BIG_ENDIAN_ONLY(from_ms->get_con() && !is_mask && arr_type != nullptr &&
 852                       arr_type->elem()->array_element_basic_type() != elem_bt);
 853   BasicType mem_elem_bt = mismatched_ms ? arr_type->elem()->array_element_basic_type() : elem_bt;
 854   if (!is_java_primitive(mem_elem_bt)) {
 855     log_if_needed("  ** non-primitive array element type");
 856     return false;
 857   }
 858   int mem_num_elem = mismatched_ms ? (num_elem * type2aelembytes(elem_bt)) / type2aelembytes(mem_elem_bt) : num_elem;
 859   if (arr_type != nullptr && !is_mask && !elem_consistent_with_arr(elem_bt, arr_type, mismatched_ms)) {
 860     log_if_needed("  ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s ismask=no",
 861                     is_store, is_store ? "store" : "load",
 862                     num_elem, type2name(elem_bt), type2name(arr_type->elem()->array_element_basic_type()));
 863     set_map(old_map);
 864     set_sp(old_sp);
 865     return false;
 866   }
 867 
 868   // In case of mismatched memory segment accesses, we need to double check that the source type memory operations are supported by backend.
 869   if (mismatched_ms) {
 870     if (is_store) {
 871       if (!arch_supports_vector(Op_StoreVector, num_elem, elem_bt, VecMaskNotUsed)
 872           || !arch_supports_vector(Op_VectorReinterpret, mem_num_elem, mem_elem_bt, VecMaskNotUsed)) {
 873         log_if_needed("  ** not supported: arity=%d op=%s vlen=%d*8 etype=%s/8 ismask=no",
 874                         is_store, "store",
 875                         num_elem, type2name(elem_bt));
 876         set_map(old_map);
 877         set_sp(old_sp);
 878         return false; // not supported
 879       }
 880     } else {
 881       if (!arch_supports_vector(Op_LoadVector, mem_num_elem, mem_elem_bt, VecMaskNotUsed)
 882           || !arch_supports_vector(Op_VectorReinterpret, num_elem, elem_bt, VecMaskNotUsed)) {
 883         log_if_needed("  ** not supported: arity=%d op=%s vlen=%d*8 etype=%s/8 ismask=no",
 884                         is_store, "load",
 885                         mem_num_elem, type2name(mem_elem_bt));
 886         set_map(old_map);
 887         set_sp(old_sp);
 888         return false; // not supported
 889       }
 890     }
 891   }
 892   if (is_mask) {
 893     if (!is_store) {
 894       if (!arch_supports_vector(Op_LoadVector, num_elem, elem_bt, VecMaskUseLoad)) {
 895         set_map(old_map);
 896         set_sp(old_sp);
 897         return false; // not supported
 898       }
 899     } else {
 900       if (!arch_supports_vector(Op_StoreVector, num_elem, elem_bt, VecMaskUseStore)) {
 901         set_map(old_map);
 902         set_sp(old_sp);
 903         return false; // not supported
 904       }
 905     }
 906   }
 907 
 908   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
 909 
 910   if (needs_cpu_membar) {
 911     insert_mem_bar(Op_MemBarCPUOrder);
 912   }
 913 
 914   if (is_store) {
 915     Node* val = unbox_vector(argument(7), vbox_type, elem_bt, num_elem);
 916     if (val == nullptr) {
 917       set_map(old_map);
 918       set_sp(old_sp);
 919       return false; // operand unboxing failed
 920     }
 921     set_all_memory(reset_memory());
 922 
 923     // In case the store needs to happen to byte array, reinterpret the incoming vector to byte vector.
 924     int store_num_elem = num_elem;
 925     if (mismatched_ms) {
 926       store_num_elem = mem_num_elem;
 927       const TypeVect* to_vect_type = TypeVect::make(mem_elem_bt, store_num_elem);
 928       val = gvn().transform(new VectorReinterpretNode(val, val->bottom_type()->is_vect(), to_vect_type));
 929     }
 930     if (is_mask) {
 931       val = gvn().transform(VectorStoreMaskNode::make(gvn(), val, elem_bt, num_elem));
 932     }
 933     Node* vstore = gvn().transform(StoreVectorNode::make(0, control(), memory(addr), addr, addr_type, val, store_num_elem));
 934     set_memory(vstore, addr_type);
 935   } else {
 936     // When using byte array, we need to load as byte then reinterpret the value. Otherwise, do a simple vector load.
 937     Node* vload = nullptr;
 938     if (mismatched_ms) {
 939       vload = gvn().transform(LoadVectorNode::make(0, control(), memory(addr), addr, addr_type, mem_num_elem, mem_elem_bt));
 940       const TypeVect* to_vect_type = TypeVect::make(elem_bt, num_elem);
 941       vload = gvn().transform(new VectorReinterpretNode(vload, vload->bottom_type()->is_vect(), to_vect_type));
 942     } else {
 943       // Special handle for masks
 944       if (is_mask) {
 945         vload = gvn().transform(LoadVectorNode::make(0, control(), memory(addr), addr, addr_type, num_elem, T_BOOLEAN));
 946         vload = gvn().transform(new VectorLoadMaskNode(vload, TypeVect::makemask(elem_bt, num_elem)));
 947       } else {
 948         vload = gvn().transform(LoadVectorNode::make(0, control(), memory(addr), addr, addr_type, num_elem, elem_bt));
 949       }
 950     }
 951     Node* box = box_vector(vload, vbox_type, elem_bt, num_elem);
 952     set_result(box);
 953   }
 954 
 955   destruct_map_clone(old_map);
 956 
 957   if (needs_cpu_membar) {
 958     insert_mem_bar(Op_MemBarCPUOrder);
 959   }
 960 
 961   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
 962   return true;
 963 }
 964 
 965 //  public static
 966 //  <C,
 967 //   V extends Vector<?>,
 968 //   E,
 969 //   S extends VectorSpecies<E>,
 970 //   M extends VectorMask<E>>
 971 //  V loadMasked(Class<? extends V> vClass, Class<M> mClass, Class<E> eClass,
 972 //               int length, Object base, long offset,          // Unsafe addressing
 973 //               boolean fromSegment,
 974 //               M m, int offsetInRange,
 975 //               C container, long index, S s,                  // Arguments for default implementation
 976 //               LoadVectorMaskedOperation<C, V, S, M> defaultImpl) {
 977 //  public static
 978 //  <C,
 979 //   V extends Vector<E>,
 980 //   M extends VectorMask<E>,
 981 //   E>
 982 //  void storeMasked(Class<? extends V> vClass, Class<M> mClass, Class<E> eClass,
 983 //                   int length,
 984 //                   Object base, long offset,                  // Unsafe addressing
 985 //                   boolean fromSegment,
 986 //                   V v, M m, C container, long index,         // Arguments for default implementation
 987 //                   StoreVectorMaskedOperation<C, V, M> defaultImpl) {
 988 
 989 bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) {
 990   const TypeInstPtr* vector_klass = gvn().type(argument(0))->isa_instptr();
 991   const TypeInstPtr* mask_klass   = gvn().type(argument(1))->isa_instptr();
 992   const TypeInstPtr* elem_klass   = gvn().type(argument(2))->isa_instptr();
 993   const TypeInt*     vlen         = gvn().type(argument(3))->isa_int();
 994   const TypeInt*     from_ms      = gvn().type(argument(7))->isa_int();
 995 
 996   if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
 997       mask_klass   == nullptr || mask_klass->const_oop()   == nullptr ||
 998       elem_klass   == nullptr || elem_klass->const_oop()   == nullptr ||
 999       vlen         == nullptr || !vlen->is_con() ||
1000       from_ms      == nullptr || !from_ms->is_con()) {
1001     log_if_needed("  ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s from_ms=%s",
1002                     NodeClassNames[argument(0)->Opcode()],
1003                     NodeClassNames[argument(1)->Opcode()],
1004                     NodeClassNames[argument(2)->Opcode()],
1005                     NodeClassNames[argument(3)->Opcode()],
1006                     NodeClassNames[argument(7)->Opcode()]);
1007     return false; // not enough info for intrinsification
1008   }
1009   if (!is_klass_initialized(vector_klass)) {
1010     log_if_needed("  ** klass argument not initialized");
1011     return false;
1012   }
1013 
1014   if (!is_klass_initialized(mask_klass)) {
1015     log_if_needed("  ** mask klass argument not initialized");
1016     return false;
1017   }
1018 
1019   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
1020   if (!elem_type->is_primitive_type()) {
1021     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
1022     return false; // should be primitive type
1023   }
1024 
1025   BasicType elem_bt = elem_type->basic_type();
1026   int num_elem = vlen->get_con();
1027 
1028   Node* base = argument(4);
1029   Node* offset = ConvL2X(argument(5));
1030 
1031   // Save state and restore on bailout
1032   uint old_sp = sp();
1033   SafePointNode* old_map = clone_map();
1034 
1035   Node* addr = make_unsafe_address(base, offset, elem_bt, true);
1036   const TypePtr *addr_type = gvn().type(addr)->isa_ptr();
1037   const TypeAryPtr* arr_type = addr_type->isa_aryptr();
1038 
1039   bool mismatched_ms = from_ms->get_con() && arr_type != nullptr && arr_type->elem()->array_element_basic_type() != elem_bt;
1040   BIG_ENDIAN_ONLY(if (mismatched_ms) return false;)
1041   // If there is no consistency between array and vector element types, it must be special byte array case
1042   if (arr_type != nullptr && !elem_consistent_with_arr(elem_bt, arr_type, mismatched_ms)) {
1043     log_if_needed("  ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s",
1044                     is_store, is_store ? "storeMasked" : "loadMasked",
1045                     num_elem, type2name(elem_bt), type2name(arr_type->elem()->array_element_basic_type()));
1046     set_map(old_map);
1047     set_sp(old_sp);
1048     return false;
1049   }
1050 
1051   int mem_num_elem = mismatched_ms ? num_elem * type2aelembytes(elem_bt) : num_elem;
1052   BasicType mem_elem_bt = mismatched_ms ? T_BYTE : elem_bt;
1053   bool supports_predicate = arch_supports_vector(is_store ? Op_StoreVectorMasked : Op_LoadVectorMasked,
1054                                                 mem_num_elem, mem_elem_bt, VecMaskUseLoad);
1055 
1056   // If current arch does not support the predicated operations, we have to bail
1057   // out when current case uses the predicate feature.
1058   if (!supports_predicate) {
1059     bool needs_predicate = false;
1060     if (is_store) {
1061       // Masked vector store always uses the predicated store.
1062       needs_predicate = true;
1063     } else {
1064       // Masked vector load with IOOBE always uses the predicated load.
1065       const TypeInt* offset_in_range = gvn().type(argument(9))->isa_int();
1066       if (!offset_in_range->is_con()) {
1067         log_if_needed("  ** missing constant: offsetInRange=%s",
1068                         NodeClassNames[argument(8)->Opcode()]);
1069         set_map(old_map);
1070         set_sp(old_sp);
1071         return false;
1072       }
1073       needs_predicate = (offset_in_range->get_con() == 0);
1074     }
1075 
1076     if (needs_predicate) {
1077       log_if_needed("  ** not supported: op=%s vlen=%d etype=%s mismatched_ms=%d",
1078                       is_store ? "storeMasked" : "loadMasked",
1079                       num_elem, type2name(elem_bt), mismatched_ms ? 1 : 0);
1080       set_map(old_map);
1081       set_sp(old_sp);
1082       return false;
1083     }
1084   }
1085 
1086   // This only happens for masked vector load. If predicate is not supported, then check whether
1087   // the normal vector load and blend operations are supported by backend.
1088   if (!supports_predicate && (!arch_supports_vector(Op_LoadVector, mem_num_elem, mem_elem_bt, VecMaskNotUsed) ||
1089       !arch_supports_vector(Op_VectorBlend, mem_num_elem, mem_elem_bt, VecMaskUseLoad))) {
1090     log_if_needed("  ** not supported: op=loadMasked vlen=%d etype=%s mismatched_ms=%d",
1091                     num_elem, type2name(elem_bt), mismatched_ms ? 1 : 0);
1092     set_map(old_map);
1093     set_sp(old_sp);
1094     return false;
1095   }
1096 
1097   // Since we are using byte array, we need to double check that the vector reinterpret operation
1098   // with byte type is supported by backend.
1099   if (mismatched_ms) {
1100     if (!arch_supports_vector(Op_VectorReinterpret, mem_num_elem, T_BYTE, VecMaskNotUsed)) {
1101       log_if_needed("  ** not supported: arity=%d op=%s vlen=%d etype=%s mismatched_ms=1",
1102                       is_store, is_store ? "storeMasked" : "loadMasked",
1103                       num_elem, type2name(elem_bt));
1104       set_map(old_map);
1105       set_sp(old_sp);
1106       return false;
1107     }
1108   }
1109 
1110   // Since it needs to unbox the mask, we need to double check that the related load operations
1111   // for mask are supported by backend.
1112   if (!arch_supports_vector(Op_LoadVector, num_elem, elem_bt, VecMaskUseLoad)) {
1113     log_if_needed("  ** not supported: arity=%d op=%s vlen=%d etype=%s",
1114                       is_store, is_store ? "storeMasked" : "loadMasked",
1115                       num_elem, type2name(elem_bt));
1116     set_map(old_map);
1117     set_sp(old_sp);
1118     return false;
1119   }
1120 
1121   // Can base be null? Otherwise, always on-heap access.
1122   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(gvn().type(base));
1123   if (can_access_non_heap) {
1124     insert_mem_bar(Op_MemBarCPUOrder);
1125   }
1126 
1127   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
1128   ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
1129   assert(!is_vector_mask(vbox_klass) && is_vector_mask(mbox_klass), "Invalid class type");
1130   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
1131   const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
1132 
1133   Node* mask = unbox_vector(is_store ? argument(9) : argument(8), mbox_type, elem_bt, num_elem);
1134   if (mask == nullptr) {
1135     log_if_needed("  ** unbox failed mask=%s",
1136                     is_store ? NodeClassNames[argument(9)->Opcode()]
1137                              : NodeClassNames[argument(8)->Opcode()]);
1138     set_map(old_map);
1139     set_sp(old_sp);
1140     return false;
1141   }
1142 
1143   if (is_store) {
1144     Node* val = unbox_vector(argument(8), vbox_type, elem_bt, num_elem);
1145     if (val == nullptr) {
1146       log_if_needed("  ** unbox failed vector=%s",
1147                       NodeClassNames[argument(8)->Opcode()]);
1148       set_map(old_map);
1149       set_sp(old_sp);
1150       return false; // operand unboxing failed
1151     }
1152     set_all_memory(reset_memory());
1153 
1154     if (mismatched_ms) {
1155       // Reinterpret the incoming vector to byte vector.
1156       const TypeVect* to_vect_type = TypeVect::make(mem_elem_bt, mem_num_elem);
1157       val = gvn().transform(new VectorReinterpretNode(val, val->bottom_type()->is_vect(), to_vect_type));
1158       // Reinterpret the vector mask to byte type.
1159       const TypeVect* from_mask_type = TypeVect::makemask(elem_bt, num_elem);
1160       const TypeVect* to_mask_type = TypeVect::makemask(mem_elem_bt, mem_num_elem);
1161       mask = gvn().transform(new VectorReinterpretNode(mask, from_mask_type, to_mask_type));
1162     }
1163     Node* vstore = gvn().transform(new StoreVectorMaskedNode(control(), memory(addr), addr, val, addr_type, mask));
1164     set_memory(vstore, addr_type);
1165   } else {
1166     Node* vload = nullptr;
1167 
1168     if (mismatched_ms) {
1169       // Reinterpret the vector mask to byte type.
1170       const TypeVect* from_mask_type = TypeVect::makemask(elem_bt, num_elem);
1171       const TypeVect* to_mask_type = TypeVect::makemask(mem_elem_bt, mem_num_elem);
1172       mask = gvn().transform(new VectorReinterpretNode(mask, from_mask_type, to_mask_type));
1173     }
1174 
1175     if (supports_predicate) {
1176       // Generate masked load vector node if predicate feature is supported.
1177       const TypeVect* vt = TypeVect::make(mem_elem_bt, mem_num_elem);
1178       vload = gvn().transform(new LoadVectorMaskedNode(control(), memory(addr), addr, addr_type, vt, mask));
1179     } else {
1180       // Use the vector blend to implement the masked load vector. The biased elements are zeros.
1181       Node* zero = gvn().transform(gvn().zerocon(mem_elem_bt));
1182       zero = gvn().transform(VectorNode::scalar2vector(zero, mem_num_elem, mem_elem_bt));
1183       vload = gvn().transform(LoadVectorNode::make(0, control(), memory(addr), addr, addr_type, mem_num_elem, mem_elem_bt));
1184       vload = gvn().transform(new VectorBlendNode(zero, vload, mask));
1185     }
1186 
1187     if (mismatched_ms) {
1188       const TypeVect* to_vect_type = TypeVect::make(elem_bt, num_elem);
1189       vload = gvn().transform(new VectorReinterpretNode(vload, vload->bottom_type()->is_vect(), to_vect_type));
1190     }
1191 
1192     Node* box = box_vector(vload, vbox_type, elem_bt, num_elem);
1193     set_result(box);
1194   }
1195 
1196   destruct_map_clone(old_map);
1197 
1198   if (can_access_non_heap) {
1199     insert_mem_bar(Op_MemBarCPUOrder);
1200   }
1201 
1202   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
1203   return true;
1204 }
1205 
1206 // <C,
1207 //  V extends Vector<?>,
1208 //  W extends Vector<Integer>,
1209 //  S extends VectorSpecies<E>,
1210 //  M extends VectorMask<E>,
1211 //  E>
1212 // V loadWithMap(Class<? extends V> vectorClass, Class<M> maskClass, Class<E> elementType, int length,
1213 //               Class<? extends Vector<Integer>> vectorIndexClass,
1214 //               Object base, long offset, // Unsafe addressing
1215 //               W index_vector, M m,
1216 //               C container, int index, int[] indexMap, int indexM, S s, // Arguments for default implementation
1217 //               LoadVectorOperationWithMap<C, V, E, S, M> defaultImpl)
1218 //
1219 //  <C,
1220 //   V extends Vector<E>,
1221 //   W extends Vector<Integer>,
1222 //   M extends VectorMask<E>,
1223 //   E>
1224 //  void storeWithMap(Class<? extends V> vectorClass, Class<M> maskClass, Class<E> elementType,
1225 //                    int length, Class<? extends Vector<Integer>> vectorIndexClass, Object base, long offset,    // Unsafe addressing
1226 //                    W index_vector, V v, M m,
1227 //                    C container, int index, int[] indexMap, int indexM, // Arguments for default implementation
1228 //                    StoreVectorOperationWithMap<C, V, M, E> defaultImpl)
1229 //
1230 bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) {
1231   const TypeInstPtr* vector_klass     = gvn().type(argument(0))->isa_instptr();
1232   const TypeInstPtr* mask_klass       = gvn().type(argument(1))->isa_instptr();
1233   const TypeInstPtr* elem_klass       = gvn().type(argument(2))->isa_instptr();
1234   const TypeInt*     vlen             = gvn().type(argument(3))->isa_int();
1235   const TypeInstPtr* vector_idx_klass = gvn().type(argument(4))->isa_instptr();
1236 
1237   if (vector_klass     == nullptr || vector_klass->const_oop()     == nullptr ||
1238 //      mask_klass       == nullptr || mask_klass->const_oop()       == nullptr ||
1239       elem_klass       == nullptr || elem_klass->const_oop()       == nullptr ||
1240       vlen             == nullptr || !vlen->is_con() ||
1241       vector_idx_klass == nullptr || vector_idx_klass->const_oop() == nullptr) {
1242     log_if_needed("  ** missing constant: vclass=%s etype=%s vlen=%s viclass=%s",
1243                     NodeClassNames[argument(0)->Opcode()],
1244                     NodeClassNames[argument(2)->Opcode()],
1245                     NodeClassNames[argument(3)->Opcode()],
1246                     NodeClassNames[argument(4)->Opcode()]);
1247     return false; // not enough info for intrinsification
1248   }
1249 
1250   if (!is_klass_initialized(vector_klass) || !is_klass_initialized(vector_idx_klass)) {
1251     log_if_needed("  ** klass argument not initialized");
1252     return false;
1253   }
1254 
1255   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
1256   if (!elem_type->is_primitive_type()) {
1257     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
1258     return false; // should be primitive type
1259   }
1260 
1261   BasicType elem_bt = elem_type->basic_type();
1262   int num_elem = vlen->get_con();
1263 
1264   const Type* vmask_type = gvn().type(is_scatter ? argument(10) : argument(9));
1265   bool is_masked_op = vmask_type != TypePtr::NULL_PTR;
1266   if (is_masked_op) {
1267     if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) {
1268       log_if_needed("  ** missing constant: maskclass=%s", NodeClassNames[argument(1)->Opcode()]);
1269       return false; // not enough info for intrinsification
1270     }
1271 
1272     if (!is_klass_initialized(mask_klass)) {
1273       log_if_needed("  ** mask klass argument not initialized");
1274       return false;
1275     }
1276 
1277     if (vmask_type->maybe_null()) {
1278       log_if_needed("  ** null mask values are not allowed for masked op");
1279       return false;
1280     }
1281 
1282     // Check whether the predicated gather/scatter node is supported by architecture.
1283     VectorMaskUseType mask = (VectorMaskUseType) (VecMaskUseLoad | VecMaskUsePred);
1284     if (!arch_supports_vector(is_scatter ? Op_StoreVectorScatterMasked : Op_LoadVectorGatherMasked, num_elem, elem_bt, mask)) {
1285       log_if_needed("  ** not supported: arity=%d op=%s vlen=%d etype=%s is_masked_op=1",
1286                       is_scatter, is_scatter ? "scatterMasked" : "gatherMasked",
1287                       num_elem, type2name(elem_bt));
1288       return false; // not supported
1289     }
1290   } else {
1291     // Check whether the normal gather/scatter node is supported for non-masked operation.
1292     if (!arch_supports_vector(is_scatter ? Op_StoreVectorScatter : Op_LoadVectorGather, num_elem, elem_bt, VecMaskNotUsed)) {
1293       log_if_needed("  ** not supported: arity=%d op=%s vlen=%d etype=%s is_masked_op=0",
1294                       is_scatter, is_scatter ? "scatter" : "gather",
1295                       num_elem, type2name(elem_bt));
1296       return false; // not supported
1297     }
1298   }
1299 
1300   // Check that the vector holding indices is supported by architecture
1301   // For sub-word gathers expander receive index array.
1302   if (!is_subword_type(elem_bt) && !arch_supports_vector(Op_LoadVector, num_elem, T_INT, VecMaskNotUsed)) {
1303       log_if_needed("  ** not supported: arity=%d op=%s/loadindex vlen=%d etype=int is_masked_op=%d",
1304                       is_scatter, is_scatter ? "scatter" : "gather",
1305                       num_elem, is_masked_op ? 1 : 0);
1306       return false; // not supported
1307   }
1308 
1309   Node* base = argument(5);
1310   Node* offset = ConvL2X(argument(6));
1311 
1312   // Save state and restore on bailout
1313   uint old_sp = sp();
1314   SafePointNode* old_map = clone_map();
1315 
1316   Node* addr = make_unsafe_address(base, offset, elem_bt, true);
1317 
1318   const TypePtr *addr_type = gvn().type(addr)->isa_ptr();
1319   const TypeAryPtr* arr_type = addr_type->isa_aryptr();
1320 
1321   // The array must be consistent with vector type
1322   if (arr_type == nullptr || (arr_type != nullptr && !elem_consistent_with_arr(elem_bt, arr_type, false))) {
1323     log_if_needed("  ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s ismask=no",
1324                     is_scatter, is_scatter ? "scatter" : "gather",
1325                     num_elem, type2name(elem_bt), type2name(arr_type->elem()->array_element_basic_type()));
1326     set_map(old_map);
1327     set_sp(old_sp);
1328     return false;
1329   }
1330 
1331   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
1332   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
1333   ciKlass* vbox_idx_klass = vector_idx_klass->const_oop()->as_instance()->java_lang_Class_klass();
1334   if (vbox_idx_klass == nullptr) {
1335     set_map(old_map);
1336     set_sp(old_sp);
1337     return false;
1338   }
1339 
1340   Node* index_vect = nullptr;
1341   const TypeInstPtr* vbox_idx_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_idx_klass);
1342   if (!is_subword_type(elem_bt)) {
1343     index_vect = unbox_vector(argument(8), vbox_idx_type, T_INT, num_elem);
1344     if (index_vect == nullptr) {
1345       set_map(old_map);
1346       set_sp(old_sp);
1347       return false;
1348     }
1349   }
1350 
1351   Node* mask = nullptr;
1352   if (is_masked_op) {
1353     ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
1354     const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
1355     mask = unbox_vector(is_scatter ? argument(10) : argument(9), mbox_type, elem_bt, num_elem);
1356     if (mask == nullptr) {
1357       log_if_needed("  ** unbox failed mask=%s",
1358                     is_scatter ? NodeClassNames[argument(10)->Opcode()]
1359                                : NodeClassNames[argument(9)->Opcode()]);
1360       set_map(old_map);
1361       set_sp(old_sp);
1362       return false;
1363     }
1364   }
1365 
1366   const TypeVect* vector_type = TypeVect::make(elem_bt, num_elem);
1367   if (is_scatter) {
1368     Node* val = unbox_vector(argument(9), vbox_type, elem_bt, num_elem);
1369     if (val == nullptr) {
1370       set_map(old_map);
1371       set_sp(old_sp);
1372       return false; // operand unboxing failed
1373     }
1374     set_all_memory(reset_memory());
1375 
1376     Node* vstore = nullptr;
1377     if (mask != nullptr) {
1378       vstore = gvn().transform(new StoreVectorScatterMaskedNode(control(), memory(addr), addr, addr_type, val, index_vect, mask));
1379     } else {
1380       vstore = gvn().transform(new StoreVectorScatterNode(control(), memory(addr), addr, addr_type, val, index_vect));
1381     }
1382     set_memory(vstore, addr_type);
1383   } else {
1384     Node* vload = nullptr;
1385     Node* index    = argument(11);
1386     Node* indexMap = argument(12);
1387     Node* indexM   = argument(13);
1388     if (mask != nullptr) {
1389       if (is_subword_type(elem_bt)) {
1390         Node* index_arr_base = array_element_address(indexMap, indexM, T_INT);
1391         vload = gvn().transform(new LoadVectorGatherMaskedNode(control(), memory(addr), addr, addr_type, vector_type, index_arr_base, mask, index));
1392       } else {
1393         vload = gvn().transform(new LoadVectorGatherMaskedNode(control(), memory(addr), addr, addr_type, vector_type, index_vect, mask));
1394       }
1395     } else {
1396       if (is_subword_type(elem_bt)) {
1397         Node* index_arr_base = array_element_address(indexMap, indexM, T_INT);
1398         vload = gvn().transform(new LoadVectorGatherNode(control(), memory(addr), addr, addr_type, vector_type, index_arr_base, index));
1399       } else {
1400         vload = gvn().transform(new LoadVectorGatherNode(control(), memory(addr), addr, addr_type, vector_type, index_vect));
1401       }
1402     }
1403     Node* box = box_vector(vload, vbox_type, elem_bt, num_elem);
1404     set_result(box);
1405   }
1406 
1407   destruct_map_clone(old_map);
1408 
1409   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
1410   return true;
1411 }
1412 
1413 // public static
1414 // <V extends Vector<E>,
1415 //  M extends VectorMask<E>,
1416 //  E>
1417 // long reductionCoerced(int oprId, Class<? extends V> vectorClass, Class<? extends M> maskClass,
1418 //                       Class<E> elementType, int length, V v, M m,
1419 //                       ReductionOperation<V, M> defaultImpl)
1420 bool LibraryCallKit::inline_vector_reduction() {
1421   const TypeInt*     opr          = gvn().type(argument(0))->isa_int();
1422   const TypeInstPtr* vector_klass = gvn().type(argument(1))->isa_instptr();
1423   const TypeInstPtr* mask_klass   = gvn().type(argument(2))->isa_instptr();
1424   const TypeInstPtr* elem_klass   = gvn().type(argument(3))->isa_instptr();
1425   const TypeInt*     vlen         = gvn().type(argument(4))->isa_int();
1426 
1427   if (opr          == nullptr || !opr->is_con() ||
1428       vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
1429 //      mask_klass   == nullptr || mask_klass->const_oop()   == nullptr ||
1430       elem_klass   == nullptr || elem_klass->const_oop()   == nullptr ||
1431       vlen         == nullptr || !vlen->is_con()) {
1432     log_if_needed("  ** missing constant: opr=%s vclass=%s etype=%s vlen=%s",
1433                     NodeClassNames[argument(0)->Opcode()],
1434                     NodeClassNames[argument(1)->Opcode()],
1435                     NodeClassNames[argument(3)->Opcode()],
1436                     NodeClassNames[argument(4)->Opcode()]);
1437     return false; // not enough info for intrinsification
1438   }
1439   if (!is_klass_initialized(vector_klass)) {
1440     log_if_needed("  ** klass argument not initialized");
1441     return false;
1442   }
1443   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
1444   if (!elem_type->is_primitive_type()) {
1445     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
1446     return false; // should be primitive type
1447   }
1448 
1449   const Type* vmask_type = gvn().type(argument(6));
1450   bool is_masked_op = vmask_type != TypePtr::NULL_PTR;
1451   if (is_masked_op) {
1452     if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) {
1453       log_if_needed("  ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]);
1454       return false; // not enough info for intrinsification
1455     }
1456 
1457     if (!is_klass_initialized(mask_klass)) {
1458       log_if_needed("  ** mask klass argument not initialized");
1459       return false;
1460     }
1461 
1462     if (vmask_type->maybe_null()) {
1463       log_if_needed("  ** null mask values are not allowed for masked op");
1464       return false;
1465     }
1466   }
1467 
1468   BasicType elem_bt = elem_type->basic_type();
1469   int num_elem = vlen->get_con();
1470   int opc  = VectorSupport::vop2ideal(opr->get_con(), elem_bt);
1471   int sopc = ReductionNode::opcode(opc, elem_bt);
1472 
1473   // Ensure reduction operation for lanewise operation
1474   // When using mask, mask use type needs to be VecMaskUseLoad.
1475   if (sopc == opc || !arch_supports_vector(sopc, num_elem, elem_bt, is_masked_op ? VecMaskUseLoad : VecMaskNotUsed)) {
1476     log_if_needed("  ** not supported: arity=1 op=%d/reduce vlen=%d etype=%s is_masked_op=%d",
1477                     sopc, num_elem, type2name(elem_bt), is_masked_op ? 1 : 0);
1478     return false;
1479   }
1480 
1481   // Return true if current platform has implemented the masked operation with predicate feature.
1482   bool use_predicate = is_masked_op && arch_supports_vector(sopc, num_elem, elem_bt, VecMaskUsePred);
1483   if (is_masked_op && !use_predicate && !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)) {
1484     log_if_needed("  ** not supported: arity=1 op=%d/reduce vlen=%d etype=%s is_masked_op=1",
1485                     sopc, num_elem, type2name(elem_bt));
1486     return false;
1487   }
1488 
1489   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
1490   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
1491 
1492   Node* opd = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
1493   if (opd == nullptr) {
1494     return false; // operand unboxing failed
1495   }
1496 
1497   Node* mask = nullptr;
1498   if (is_masked_op) {
1499     ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
1500     assert(is_vector_mask(mbox_klass), "argument(2) should be a mask class");
1501     const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
1502     mask = unbox_vector(argument(6), mbox_type, elem_bt, num_elem);
1503     if (mask == nullptr) {
1504       log_if_needed("  ** unbox failed mask=%s",
1505                       NodeClassNames[argument(6)->Opcode()]);
1506       return false;
1507     }
1508   }
1509 
1510   Node* init = ReductionNode::make_identity_con_scalar(gvn(), opc, elem_bt);
1511   Node* value = opd;
1512 
1513   assert(mask != nullptr || !is_masked_op, "Masked op needs the mask value never null");
1514   if (mask != nullptr && !use_predicate) {
1515     Node* reduce_identity = gvn().transform(VectorNode::scalar2vector(init, num_elem, elem_bt));
1516     value = gvn().transform(new VectorBlendNode(reduce_identity, value, mask));
1517   }
1518 
1519   // Make an unordered Reduction node. This affects only AddReductionVF/VD and MulReductionVF/VD,
1520   // as these operations are allowed to be associative (not requiring strict order) in VectorAPI.
1521   value = ReductionNode::make(opc, nullptr, init, value, elem_bt, /* requires_strict_order */ false);
1522 
1523   if (mask != nullptr && use_predicate) {
1524     value->add_req(mask);
1525     value->add_flag(Node::Flag_is_predicated_vector);
1526   }
1527 
1528   value = gvn().transform(value);
1529 
1530   Node* bits = nullptr;
1531   switch (elem_bt) {
1532     case T_BYTE:
1533     case T_SHORT:
1534     case T_INT: {
1535       bits = gvn().transform(new ConvI2LNode(value));
1536       break;
1537     }
1538     case T_FLOAT: {
1539       value = gvn().transform(new MoveF2INode(value));
1540       bits  = gvn().transform(new ConvI2LNode(value));
1541       break;
1542     }
1543     case T_DOUBLE: {
1544       bits = gvn().transform(new MoveD2LNode(value));
1545       break;
1546     }
1547     case T_LONG: {
1548       bits = value; // no conversion needed
1549       break;
1550     }
1551     default: fatal("%s", type2name(elem_bt));
1552   }
1553   set_result(bits);
1554   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
1555   return true;
1556 }
1557 
1558 // public static <V> boolean test(int cond, Class<?> vectorClass, Class<?> elementType, int vlen,
1559 //                                V v1, V v2,
1560 //                                BiFunction<V, V, Boolean> defaultImpl)
1561 //
1562 bool LibraryCallKit::inline_vector_test() {
1563   const TypeInt*     cond         = gvn().type(argument(0))->isa_int();
1564   const TypeInstPtr* vector_klass = gvn().type(argument(1))->isa_instptr();
1565   const TypeInstPtr* elem_klass   = gvn().type(argument(2))->isa_instptr();
1566   const TypeInt*     vlen         = gvn().type(argument(3))->isa_int();
1567 
1568   if (cond         == nullptr || !cond->is_con() ||
1569       vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
1570       elem_klass   == nullptr || elem_klass->const_oop()   == nullptr ||
1571       vlen         == nullptr || !vlen->is_con()) {
1572     log_if_needed("  ** missing constant: cond=%s vclass=%s etype=%s vlen=%s",
1573                     NodeClassNames[argument(0)->Opcode()],
1574                     NodeClassNames[argument(1)->Opcode()],
1575                     NodeClassNames[argument(2)->Opcode()],
1576                     NodeClassNames[argument(3)->Opcode()]);
1577     return false; // not enough info for intrinsification
1578   }
1579   if (!is_klass_initialized(vector_klass)) {
1580     log_if_needed("  ** klass argument not initialized");
1581     return false;
1582   }
1583   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
1584   if (!elem_type->is_primitive_type()) {
1585     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
1586     return false; // should be primitive type
1587   }
1588   BasicType elem_bt = elem_type->basic_type();
1589   int num_elem = vlen->get_con();
1590   BoolTest::mask booltest = (BoolTest::mask)cond->get_con();
1591   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
1592   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
1593 
1594   if (!arch_supports_vector(Op_VectorTest, num_elem, elem_bt, is_vector_mask(vbox_klass) ? VecMaskUseLoad : VecMaskNotUsed)) {
1595     log_if_needed("  ** not supported: arity=2 op=test/%d vlen=%d etype=%s ismask=%d",
1596                     cond->get_con(), num_elem, type2name(elem_bt),
1597                     is_vector_mask(vbox_klass));
1598     return false;
1599   }
1600 
1601   Node* opd1 = unbox_vector(argument(4), vbox_type, elem_bt, num_elem);
1602   Node* opd2;
1603   if (Matcher::vectortest_needs_second_argument(booltest == BoolTest::overflow,
1604                                                 opd1->bottom_type()->isa_vectmask())) {
1605     opd2 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
1606   } else {
1607     opd2 = opd1;
1608   }
1609   if (opd1 == nullptr || opd2 == nullptr) {
1610     return false; // operand unboxing failed
1611   }
1612 
1613   Node* cmp = gvn().transform(new VectorTestNode(opd1, opd2, booltest));
1614   BoolTest::mask test = Matcher::vectortest_mask(booltest == BoolTest::overflow,
1615                                                  opd1->bottom_type()->isa_vectmask(), num_elem);
1616   Node* bol = gvn().transform(new BoolNode(cmp, test));
1617   Node* res = gvn().transform(new CMoveINode(bol, gvn().intcon(0), gvn().intcon(1), TypeInt::BOOL));
1618 
1619   set_result(res);
1620   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
1621   return true;
1622 }
1623 
1624 // public static
1625 // <V extends Vector<E>,
1626 //  M extends VectorMask<E>,
1627 //  E>
1628 // V blend(Class<? extends V> vectorClass, Class<M> maskClass, Class<E> elementType, int vlen,
1629 //         V v1, V v2, M m,
1630 //         VectorBlendOp<V, M, E> defaultImpl)
1631 bool LibraryCallKit::inline_vector_blend() {
1632   const TypeInstPtr* vector_klass = gvn().type(argument(0))->isa_instptr();
1633   const TypeInstPtr* mask_klass   = gvn().type(argument(1))->isa_instptr();
1634   const TypeInstPtr* elem_klass   = gvn().type(argument(2))->isa_instptr();
1635   const TypeInt*     vlen         = gvn().type(argument(3))->isa_int();
1636 
1637   if (mask_klass == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr) {
1638     return false; // dead code
1639   }
1640   if (mask_klass->const_oop() == nullptr || vector_klass->const_oop() == nullptr ||
1641       elem_klass->const_oop() == nullptr || !vlen->is_con()) {
1642     log_if_needed("  ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s",
1643                     NodeClassNames[argument(0)->Opcode()],
1644                     NodeClassNames[argument(1)->Opcode()],
1645                     NodeClassNames[argument(2)->Opcode()],
1646                     NodeClassNames[argument(3)->Opcode()]);
1647     return false; // not enough info for intrinsification
1648   }
1649   if (!is_klass_initialized(vector_klass) || !is_klass_initialized(mask_klass)) {
1650     log_if_needed("  ** klass argument not initialized");
1651     return false;
1652   }
1653   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
1654   if (!elem_type->is_primitive_type()) {
1655     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
1656     return false; // should be primitive type
1657   }
1658   BasicType elem_bt = elem_type->basic_type();
1659   BasicType mask_bt = elem_bt;
1660   int num_elem = vlen->get_con();
1661 
1662   if (!arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)) {
1663     log_if_needed("  ** not supported: arity=2 op=blend vlen=%d etype=%s ismask=useload",
1664                     num_elem, type2name(elem_bt));
1665     return false; // not supported
1666   }
1667   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
1668   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
1669 
1670   ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
1671   const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
1672 
1673   Node* v1   = unbox_vector(argument(4), vbox_type, elem_bt, num_elem);
1674   Node* v2   = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
1675   Node* mask = unbox_vector(argument(6), mbox_type, mask_bt, num_elem);
1676 
1677   if (v1 == nullptr || v2 == nullptr || mask == nullptr) {
1678     return false; // operand unboxing failed
1679   }
1680 
1681   Node* blend = gvn().transform(new VectorBlendNode(v1, v2, mask));
1682 
1683   Node* box = box_vector(blend, vbox_type, elem_bt, num_elem);
1684   set_result(box);
1685   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
1686   return true;
1687 }
1688 
1689 //  public static
1690 //  <V extends Vector<E>,
1691 //   M extends VectorMask<E>,
1692 //   E>
1693 //  M compare(int cond, Class<? extends V> vectorClass, Class<M> maskClass, Class<E> elementType, int vlen,
1694 //            V v1, V v2, M m,
1695 //            VectorCompareOp<V,M> defaultImpl)
1696 bool LibraryCallKit::inline_vector_compare() {
1697   const TypeInt*     cond         = gvn().type(argument(0))->isa_int();
1698   const TypeInstPtr* vector_klass = gvn().type(argument(1))->isa_instptr();
1699   const TypeInstPtr* mask_klass   = gvn().type(argument(2))->isa_instptr();
1700   const TypeInstPtr* elem_klass   = gvn().type(argument(3))->isa_instptr();
1701   const TypeInt*     vlen         = gvn().type(argument(4))->isa_int();
1702 
1703   if (cond == nullptr || vector_klass == nullptr || mask_klass == nullptr || elem_klass == nullptr || vlen == nullptr) {
1704     return false; // dead code
1705   }
1706   if (!cond->is_con() || vector_klass->const_oop() == nullptr || mask_klass->const_oop() == nullptr ||
1707       elem_klass->const_oop() == nullptr || !vlen->is_con()) {
1708     log_if_needed("  ** missing constant: cond=%s vclass=%s mclass=%s etype=%s vlen=%s",
1709                     NodeClassNames[argument(0)->Opcode()],
1710                     NodeClassNames[argument(1)->Opcode()],
1711                     NodeClassNames[argument(2)->Opcode()],
1712                     NodeClassNames[argument(3)->Opcode()],
1713                     NodeClassNames[argument(4)->Opcode()]);
1714     return false; // not enough info for intrinsification
1715   }
1716   if (!is_klass_initialized(vector_klass) || !is_klass_initialized(mask_klass)) {
1717     log_if_needed("  ** klass argument not initialized");
1718     return false;
1719   }
1720   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
1721   if (!elem_type->is_primitive_type()) {
1722     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
1723     return false; // should be primitive type
1724   }
1725 
1726   int num_elem = vlen->get_con();
1727   BasicType elem_bt = elem_type->basic_type();
1728   BasicType mask_bt = elem_bt;
1729 
1730   if ((cond->get_con() & BoolTest::unsigned_compare) != 0) {
1731     if (!Matcher::supports_vector_comparison_unsigned(num_elem, elem_bt)) {
1732       log_if_needed("  ** not supported: unsigned comparison op=comp/%d vlen=%d etype=%s ismask=usestore",
1733                       cond->get_con() & (BoolTest::unsigned_compare - 1), num_elem, type2name(elem_bt));
1734       return false;
1735     }
1736   }
1737 
1738   if (!arch_supports_vector(Op_VectorMaskCmp, num_elem, elem_bt, VecMaskUseStore)) {
1739     log_if_needed("  ** not supported: arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore",
1740                     cond->get_con(), num_elem, type2name(elem_bt));
1741     return false;
1742   }
1743 
1744   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
1745   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
1746 
1747   ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
1748   const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
1749 
1750   Node* v1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
1751   Node* v2 = unbox_vector(argument(6), vbox_type, elem_bt, num_elem);
1752 
1753   bool is_masked_op = argument(7)->bottom_type() != TypePtr::NULL_PTR;
1754   Node* mask = is_masked_op ? unbox_vector(argument(7), mbox_type, elem_bt, num_elem) : nullptr;
1755   if (is_masked_op && mask == nullptr) {
1756     log_if_needed("  ** not supported: mask = null arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore is_masked_op=1",
1757                     cond->get_con(), num_elem, type2name(elem_bt));
1758     return false;
1759   }
1760 
1761   bool use_predicate = is_masked_op && arch_supports_vector(Op_VectorMaskCmp, num_elem, elem_bt, VecMaskUsePred);
1762   if (is_masked_op && !use_predicate && !arch_supports_vector(Op_AndV, num_elem, elem_bt, VecMaskUseLoad)) {
1763     log_if_needed("  ** not supported: arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore is_masked_op=1",
1764                     cond->get_con(), num_elem, type2name(elem_bt));
1765     return false;
1766   }
1767 
1768   if (v1 == nullptr || v2 == nullptr) {
1769     return false; // operand unboxing failed
1770   }
1771   BoolTest::mask pred = (BoolTest::mask)cond->get_con();
1772   ConINode* pred_node = (ConINode*)gvn().makecon(cond);
1773 
1774   const TypeVect* vmask_type = TypeVect::makemask(mask_bt, num_elem);
1775   Node* operation = new VectorMaskCmpNode(pred, v1, v2, pred_node, vmask_type);
1776 
1777   if (is_masked_op) {
1778     if (use_predicate) {
1779       operation->add_req(mask);
1780       operation->add_flag(Node::Flag_is_predicated_vector);
1781     } else {
1782       operation = gvn().transform(operation);
1783       operation = VectorNode::make(Op_AndV, operation, mask, vmask_type);
1784     }
1785   }
1786 
1787   operation = gvn().transform(operation);
1788 
1789   Node* box = box_vector(operation, mbox_type, mask_bt, num_elem);
1790   set_result(box);
1791   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
1792   return true;
1793 }
1794 
1795 // public static
1796 // <V extends Vector<E>,
1797 //  Sh extends VectorShuffle<E>,
1798 //  M extends VectorMask<E>,
1799 //  E>
1800 // V rearrangeOp(Class<? extends V> vectorClass, Class<Sh> shuffleClass, Class<M> maskClass, Class<E> elementType, int vlen,
1801 //               V v1, Sh sh, M m,
1802 //               VectorRearrangeOp<V, Sh, M, E> defaultImpl)
1803 bool LibraryCallKit::inline_vector_rearrange() {
1804   const TypeInstPtr* vector_klass  = gvn().type(argument(0))->isa_instptr();
1805   const TypeInstPtr* shuffle_klass = gvn().type(argument(1))->isa_instptr();
1806   const TypeInstPtr* mask_klass    = gvn().type(argument(2))->isa_instptr();
1807   const TypeInstPtr* elem_klass    = gvn().type(argument(3))->isa_instptr();
1808   const TypeInt*     vlen          = gvn().type(argument(4))->isa_int();
1809 
1810   if (vector_klass == nullptr  || shuffle_klass == nullptr ||  elem_klass == nullptr || vlen == nullptr) {
1811     return false; // dead code
1812   }
1813   if (shuffle_klass->const_oop() == nullptr ||
1814       vector_klass->const_oop()  == nullptr ||
1815       elem_klass->const_oop()    == nullptr ||
1816       !vlen->is_con()) {
1817     log_if_needed("  ** missing constant: vclass=%s sclass=%s etype=%s vlen=%s",
1818                     NodeClassNames[argument(0)->Opcode()],
1819                     NodeClassNames[argument(1)->Opcode()],
1820                     NodeClassNames[argument(3)->Opcode()],
1821                     NodeClassNames[argument(4)->Opcode()]);
1822     return false; // not enough info for intrinsification
1823   }
1824   if (!is_klass_initialized(vector_klass)  ||
1825       !is_klass_initialized(shuffle_klass)) {
1826     log_if_needed("  ** klass argument not initialized");
1827     return false;
1828   }
1829   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
1830   if (!elem_type->is_primitive_type()) {
1831     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
1832     return false; // should be primitive type
1833   }
1834 
1835   BasicType elem_bt = elem_type->basic_type();
1836   BasicType shuffle_bt = elem_bt;
1837   if (shuffle_bt == T_FLOAT) {
1838     shuffle_bt = T_INT;
1839   } else if (shuffle_bt == T_DOUBLE) {
1840     shuffle_bt = T_LONG;
1841   }
1842 
1843   int num_elem = vlen->get_con();
1844   bool need_load_shuffle = Matcher::vector_rearrange_requires_load_shuffle(shuffle_bt, num_elem);
1845 
1846   if (need_load_shuffle && !arch_supports_vector(Op_VectorLoadShuffle, num_elem, shuffle_bt, VecMaskNotUsed)) {
1847     if (C->print_intrinsics()) {
1848       tty->print_cr("  ** not supported: arity=0 op=load/shuffle vlen=%d etype=%s ismask=no",
1849                     num_elem, type2name(shuffle_bt));
1850     }
1851     return false; // not supported
1852   }
1853 
1854   bool is_masked_op = argument(7)->bottom_type() != TypePtr::NULL_PTR;
1855   bool use_predicate = is_masked_op;
1856   if (is_masked_op &&
1857       (mask_klass == nullptr ||
1858        mask_klass->const_oop() == nullptr ||
1859        !is_klass_initialized(mask_klass))) {
1860     log_if_needed("  ** mask_klass argument not initialized");
1861   }
1862   if (!arch_supports_vector(Op_AndV, num_elem, elem_bt, VecMaskNotUsed)) {
1863     log_if_needed("  ** not supported: arity=2 op=and vlen=%d etype=%s ismask=no",
1864                       num_elem, type2name(elem_bt));
1865     return false;
1866   }
1867   VectorMaskUseType checkFlags = (VectorMaskUseType)(is_masked_op ? (VecMaskUseLoad | VecMaskUsePred) : VecMaskNotUsed);
1868   if (!arch_supports_vector(Op_VectorRearrange, num_elem, elem_bt, checkFlags)) {
1869     use_predicate = false;
1870     if(!is_masked_op ||
1871        (!arch_supports_vector(Op_VectorRearrange, num_elem, elem_bt, VecMaskNotUsed) ||
1872         !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)     ||
1873         !arch_supports_vector(Op_Replicate, num_elem, elem_bt, VecMaskNotUsed))) {
1874       log_if_needed("  ** not supported: arity=2 op=shuffle/rearrange vlen=%d etype=%s ismask=no",
1875                       num_elem, type2name(elem_bt));
1876       return false; // not supported
1877     }
1878   }
1879   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
1880   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
1881 
1882   ciKlass* shbox_klass = shuffle_klass->const_oop()->as_instance()->java_lang_Class_klass();
1883   const TypeInstPtr* shbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, shbox_klass);
1884 
1885   Node* v1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
1886   Node* shuffle = unbox_vector(argument(6), shbox_type, shuffle_bt, num_elem);
1887   const TypeVect* st = TypeVect::make(shuffle_bt, num_elem);
1888 
1889   if (v1 == nullptr || shuffle == nullptr) {
1890     return false; // operand unboxing failed
1891   }
1892 
1893   assert(is_power_of_2(num_elem), "wrapping invalid");
1894   Node* wrapping_mask_elem = gvn().makecon(TypeInteger::make(num_elem - 1, num_elem - 1, Type::WidenMin, shuffle_bt == T_LONG ? T_LONG : T_INT));
1895   Node* wrapping_mask = gvn().transform(VectorNode::scalar2vector(wrapping_mask_elem, num_elem, shuffle_bt));
1896   shuffle = gvn().transform(new AndVNode(shuffle, wrapping_mask, st));
1897 
1898   Node* mask = nullptr;
1899   if (is_masked_op) {
1900     ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
1901     const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
1902     mask = unbox_vector(argument(7), mbox_type, elem_bt, num_elem);
1903     if (mask == nullptr) {
1904       log_if_needed("  ** not supported: arity=3 op=shuffle/rearrange vlen=%d etype=%s ismask=useload is_masked_op=1",
1905                       num_elem, type2name(elem_bt));
1906       return false;
1907     }
1908   }
1909 
1910   if (need_load_shuffle) {
1911     shuffle = gvn().transform(new VectorLoadShuffleNode(shuffle, st));
1912   }
1913 
1914   Node* rearrange = new VectorRearrangeNode(v1, shuffle);
1915   if (is_masked_op) {
1916     if (use_predicate) {
1917       rearrange->add_req(mask);
1918       rearrange->add_flag(Node::Flag_is_predicated_vector);
1919     } else {
1920       rearrange = gvn().transform(rearrange);
1921       Node* zero = gvn().makecon(Type::get_zero_type(elem_bt));
1922       Node* zerovec = gvn().transform(VectorNode::scalar2vector(zero, num_elem, elem_bt));
1923       rearrange = new VectorBlendNode(zerovec, rearrange, mask);
1924     }
1925   }
1926   rearrange = gvn().transform(rearrange);
1927 
1928   Node* box = box_vector(rearrange, vbox_type, elem_bt, num_elem);
1929   set_result(box);
1930   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
1931   return true;
1932 }
1933 
1934 //    public static
1935 //    <V extends Vector<E>,
1936 //     M  extends VectorMask<E>,
1937 //     E>
1938 //    V selectFromOp(Class<? extends V> vClass, Class<M> mClass, Class<E> eClass,
1939 //                   int length, V v1, V v2, M m,
1940 //                   VectorSelectFromOp<V, M> defaultImpl)
1941 bool LibraryCallKit::inline_vector_select_from() {
1942   const TypeInstPtr* vector_klass  = gvn().type(argument(0))->isa_instptr();
1943   const TypeInstPtr* mask_klass    = gvn().type(argument(1))->isa_instptr();
1944   const TypeInstPtr* elem_klass    = gvn().type(argument(2))->isa_instptr();
1945   const TypeInt*     vlen          = gvn().type(argument(3))->isa_int();
1946 
1947   if (vector_klass == nullptr  || elem_klass == nullptr || vlen == nullptr ||
1948       vector_klass->const_oop()  == nullptr ||
1949       elem_klass->const_oop()    == nullptr ||
1950       !vlen->is_con()) {
1951     log_if_needed("  ** missing constant: vclass=%s etype=%s vlen=%s",
1952                     NodeClassNames[argument(0)->Opcode()],
1953                     NodeClassNames[argument(2)->Opcode()],
1954                     NodeClassNames[argument(3)->Opcode()]);
1955     return false; // not enough info for intrinsification
1956   }
1957   if (!is_klass_initialized(vector_klass)) {
1958     log_if_needed("  ** klass argument not initialized");
1959     return false;
1960   }
1961   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
1962   if (!elem_type->is_primitive_type()) {
1963     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
1964     return false; // should be primitive type
1965   }
1966   BasicType elem_bt = elem_type->basic_type();
1967   int num_elem = vlen->get_con();
1968   if (!is_power_of_2(num_elem)) {
1969     log_if_needed("  ** vlen not power of two=%d", num_elem);
1970     return false;
1971   }
1972 
1973   BasicType shuffle_bt = elem_bt;
1974   if (shuffle_bt == T_FLOAT) {
1975     shuffle_bt = T_INT;
1976   } else if (shuffle_bt == T_DOUBLE) {
1977     shuffle_bt = T_LONG;
1978   }
1979   bool need_load_shuffle = Matcher::vector_rearrange_requires_load_shuffle(shuffle_bt, num_elem);
1980 
1981   int cast_vopc = VectorCastNode::opcode(-1, elem_bt); // from vector of type elem_bt
1982   if ((need_load_shuffle && !arch_supports_vector(Op_VectorLoadShuffle, num_elem, elem_bt, VecMaskNotUsed)) ||
1983       (elem_bt != shuffle_bt && !arch_supports_vector(cast_vopc, num_elem, shuffle_bt, VecMaskNotUsed))     ||
1984       !arch_supports_vector(Op_AndV, num_elem, shuffle_bt, VecMaskNotUsed) ||
1985       !arch_supports_vector(Op_Replicate, num_elem, shuffle_bt, VecMaskNotUsed)) {
1986     log_if_needed("  ** not supported: arity=0 op=selectFrom vlen=%d etype=%s ismask=no",
1987                     num_elem, type2name(elem_bt));
1988     return false; // not supported
1989   }
1990 
1991   bool is_masked_op = argument(6)->bottom_type() != TypePtr::NULL_PTR;
1992   bool use_predicate = is_masked_op;
1993   if (is_masked_op &&
1994       (mask_klass == nullptr ||
1995        mask_klass->const_oop() == nullptr ||
1996        !is_klass_initialized(mask_klass))) {
1997     log_if_needed("  ** mask_klass argument not initialized");
1998     return false; // not supported
1999   }
2000   VectorMaskUseType checkFlags = (VectorMaskUseType)(is_masked_op ? (VecMaskUseLoad | VecMaskUsePred) : VecMaskNotUsed);
2001   if (!arch_supports_vector(Op_VectorRearrange, num_elem, elem_bt, checkFlags)) {
2002     use_predicate = false;
2003     if(!is_masked_op ||
2004        (!arch_supports_vector(Op_VectorRearrange, num_elem, elem_bt, VecMaskNotUsed) ||
2005         !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)     ||
2006         !arch_supports_vector(Op_Replicate, num_elem, elem_bt, VecMaskNotUsed))) {
2007       log_if_needed("  ** not supported: op=selectFrom vlen=%d etype=%s is_masked_op=%d",
2008                       num_elem, type2name(elem_bt), is_masked_op);
2009       return false; // not supported
2010     }
2011   }
2012   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
2013   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
2014 
2015   // v1 is the index vector
2016   Node* v1 = unbox_vector(argument(4), vbox_type, elem_bt, num_elem);
2017   // v2 is the vector being rearranged
2018   Node* v2 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
2019 
2020   if (v1 == nullptr) {
2021     log_if_needed("  ** unbox failed v1=%s", NodeClassNames[argument(4)->Opcode()]);
2022     return false; // operand unboxing failed
2023   }
2024 
2025   if (v2 == nullptr) {
2026     log_if_needed("  ** unbox failed v2=%s", NodeClassNames[argument(5)->Opcode()]);
2027     return false; // operand unboxing failed
2028   }
2029 
2030   Node* mask = nullptr;
2031   if (is_masked_op) {
2032     ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
2033     const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
2034     mask = unbox_vector(argument(6), mbox_type, elem_bt, num_elem);
2035     if (mask == nullptr) {
2036       log_if_needed("  ** unbox failed mask=%s", NodeClassNames[argument(6)->Opcode()]);
2037       return false;
2038     }
2039   }
2040 
2041   // cast index vector from elem_bt vector to byte vector
2042   const TypeVect* shuffle_vt = TypeVect::make(shuffle_bt, num_elem);
2043   Node* shuffle = v1;
2044 
2045   if (shuffle_bt != elem_bt) {
2046     shuffle = gvn().transform(VectorCastNode::make(cast_vopc, v1, shuffle_bt, num_elem));
2047   }
2048 
2049   // wrap the byte vector lanes to (num_elem - 1) to form the shuffle vector where num_elem is vector length
2050   // this is a simple AND operation as we come here only for power of two vector length
2051   Node* mod_val = gvn().makecon(TypeInteger::make(num_elem - 1, num_elem - 1, Type::WidenMin, shuffle_bt == T_LONG ? T_LONG : T_INT));
2052   Node* bcast_mod = gvn().transform(VectorNode::scalar2vector(mod_val, num_elem, shuffle_bt));
2053   shuffle = gvn().transform(VectorNode::make(Op_AndV, shuffle, bcast_mod, shuffle_vt));
2054 
2055   // load the shuffle to use in rearrange
2056   if (need_load_shuffle) {
2057     shuffle = gvn().transform(new VectorLoadShuffleNode(shuffle, shuffle_vt));
2058   }
2059 
2060   // and finally rearrange
2061   Node* rearrange = new VectorRearrangeNode(v2, shuffle);
2062   if (is_masked_op) {
2063     if (use_predicate) {
2064       // masked rearrange is supported so use that directly
2065       rearrange->add_req(mask);
2066       rearrange->add_flag(Node::Flag_is_predicated_vector);
2067     } else {
2068       // masked rearrange is not supported so emulate usig blend
2069       const TypeVect* vt = v1->bottom_type()->is_vect();
2070       rearrange = gvn().transform(rearrange);
2071 
2072       // create a zero vector with each lane element set as zero
2073       Node* zero = gvn().makecon(Type::get_zero_type(elem_bt));
2074       Node* zerovec = gvn().transform(VectorNode::scalar2vector(zero, num_elem, elem_bt));
2075 
2076       // For each lane for which mask is set, blend in the rearranged lane into zero vector
2077       rearrange = new VectorBlendNode(zerovec, rearrange, mask);
2078     }
2079   }
2080   rearrange = gvn().transform(rearrange);
2081 
2082   // box the result
2083   Node* box = box_vector(rearrange, vbox_type, elem_bt, num_elem);
2084   set_result(box);
2085 
2086   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
2087   return true;
2088 }
2089 
2090 //  public static
2091 //  <V extends Vector<E>,
2092 //   M extends VectorMask<E>,
2093 //   E>
2094 //  V broadcastInt(int opr, Class<? extends V> vectorClass, Class<? extends M> maskClass,
2095 //                 Class<E> elementType, int length,
2096 //                 V v, int n, M m,
2097 //                 VectorBroadcastIntOp<V, M> defaultImpl)
2098 bool LibraryCallKit::inline_vector_broadcast_int() {
2099   const TypeInt*     opr          = gvn().type(argument(0))->isa_int();
2100   const TypeInstPtr* vector_klass = gvn().type(argument(1))->isa_instptr();
2101   const TypeInstPtr* mask_klass   = gvn().type(argument(2))->isa_instptr();
2102   const TypeInstPtr* elem_klass   = gvn().type(argument(3))->isa_instptr();
2103   const TypeInt*     vlen         = gvn().type(argument(4))->isa_int();
2104 
2105   if (opr == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr) {
2106     return false; // dead code
2107   }
2108   if (!opr->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) {
2109     log_if_needed("  ** missing constant: opr=%s vclass=%s etype=%s vlen=%s",
2110                     NodeClassNames[argument(0)->Opcode()],
2111                     NodeClassNames[argument(1)->Opcode()],
2112                     NodeClassNames[argument(3)->Opcode()],
2113                     NodeClassNames[argument(4)->Opcode()]);
2114     return false; // not enough info for intrinsification
2115   }
2116   if (!is_klass_initialized(vector_klass)) {
2117     log_if_needed("  ** klass argument not initialized");
2118     return false;
2119   }
2120 
2121   const Type* vmask_type = gvn().type(argument(7));
2122   bool is_masked_op = vmask_type != TypePtr::NULL_PTR;
2123   if (is_masked_op) {
2124     if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) {
2125       log_if_needed("  ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]);
2126       return false; // not enough info for intrinsification
2127     }
2128 
2129     if (!is_klass_initialized(mask_klass)) {
2130       log_if_needed("  ** mask klass argument not initialized");
2131       return false;
2132     }
2133 
2134     if (vmask_type->maybe_null()) {
2135       log_if_needed("  ** null mask values are not allowed for masked op");
2136       return false;
2137     }
2138   }
2139 
2140   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
2141   if (!elem_type->is_primitive_type()) {
2142     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
2143     return false; // should be primitive type
2144   }
2145 
2146   int num_elem = vlen->get_con();
2147   BasicType elem_bt = elem_type->basic_type();
2148   int opc = VectorSupport::vop2ideal(opr->get_con(), elem_bt);
2149 
2150   bool is_shift  = VectorNode::is_shift_opcode(opc);
2151   bool is_rotate = VectorNode::is_rotate_opcode(opc);
2152 
2153   if (opc == 0 || (!is_shift && !is_rotate)) {
2154     log_if_needed("  ** operation not supported: op=%d bt=%s", opr->get_con(), type2name(elem_bt));
2155     return false; // operation not supported
2156   }
2157 
2158   int sopc = VectorNode::opcode(opc, elem_bt);
2159   if (sopc == 0) {
2160     log_if_needed("  ** operation not supported: opc=%s bt=%s", NodeClassNames[opc], type2name(elem_bt));
2161     return false; // operation not supported
2162   }
2163 
2164   Node* cnt  = argument(6);
2165   const TypeInt* cnt_type = cnt->bottom_type()->isa_int();
2166 
2167   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
2168   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
2169 
2170   // If CPU supports vector constant rotate instructions pass it directly
2171   bool is_const_rotate = is_rotate && cnt_type && cnt_type->is_con() &&
2172                          Matcher::supports_vector_constant_rotates(cnt_type->get_con());
2173   bool has_scalar_args = is_rotate ? !is_const_rotate : true;
2174 
2175   VectorMaskUseType checkFlags = (VectorMaskUseType)(is_masked_op ? (VecMaskUseLoad | VecMaskUsePred) : VecMaskNotUsed);
2176   bool use_predicate = is_masked_op;
2177 
2178   if (!arch_supports_vector(sopc, num_elem, elem_bt, checkFlags, has_scalar_args)) {
2179     use_predicate = false;
2180     if (!is_masked_op ||
2181         (!arch_supports_vector(sopc, num_elem, elem_bt, VecMaskNotUsed, has_scalar_args) ||
2182          !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad))) {
2183 
2184       log_if_needed("  ** not supported: arity=0 op=int/%d vlen=%d etype=%s is_masked_op=%d",
2185                       sopc, num_elem, type2name(elem_bt), is_masked_op ? 1 : 0);
2186       return false; // not supported
2187     }
2188   }
2189 
2190   Node* opd1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
2191   Node* opd2 = nullptr;
2192   if (is_shift) {
2193     opd2 = vector_shift_count(cnt, opc, elem_bt, num_elem);
2194   } else {
2195     assert(is_rotate, "unexpected operation");
2196     if (!is_const_rotate) {
2197       cnt = elem_bt == T_LONG ? gvn().transform(new ConvI2LNode(cnt)) : cnt;
2198       opd2 = gvn().transform(VectorNode::scalar2vector(cnt, num_elem, elem_bt));
2199     } else {
2200       // Constant shift value.
2201       opd2 = cnt;
2202     }
2203   }
2204 
2205   if (opd1 == nullptr || opd2 == nullptr) {
2206     return false;
2207   }
2208 
2209   Node* mask = nullptr;
2210   if (is_masked_op) {
2211     ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
2212     const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
2213     mask = unbox_vector(argument(7), mbox_type, elem_bt, num_elem);
2214     if (mask == nullptr) {
2215       log_if_needed("  ** unbox failed mask=%s", NodeClassNames[argument(7)->Opcode()]);
2216       return false;
2217     }
2218   }
2219 
2220   Node* operation = VectorNode::make(opc, opd1, opd2, num_elem, elem_bt);
2221   if (is_masked_op && mask != nullptr) {
2222     if (use_predicate) {
2223       operation->add_req(mask);
2224       operation->add_flag(Node::Flag_is_predicated_vector);
2225     } else {
2226       operation = gvn().transform(operation);
2227       operation = new VectorBlendNode(opd1, operation, mask);
2228     }
2229   }
2230   operation = gvn().transform(operation);
2231   Node* vbox = box_vector(operation, vbox_type, elem_bt, num_elem);
2232   set_result(vbox);
2233   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
2234   return true;
2235 }
2236 
2237 // public static <VOUT extends VectorPayload,
2238 //                 VIN extends VectorPayload,
2239 //                   S extends VectorSpecies>
2240 // VOUT convert(int oprId,
2241 //           Class<?> fromVectorClass, Class<?> fromElementType, int fromVLen,
2242 //           Class<?>   toVectorClass, Class<?>   toElementType, int   toVLen,
2243 //           VIN v, S s,
2244 //           VectorConvertOp<VOUT, VIN, S> defaultImpl)
2245 //
2246 bool LibraryCallKit::inline_vector_convert() {
2247   const TypeInt*     opr               = gvn().type(argument(0))->isa_int();
2248 
2249   const TypeInstPtr* vector_klass_from = gvn().type(argument(1))->isa_instptr();
2250   const TypeInstPtr* elem_klass_from   = gvn().type(argument(2))->isa_instptr();
2251   const TypeInt*     vlen_from         = gvn().type(argument(3))->isa_int();
2252 
2253   const TypeInstPtr* vector_klass_to   = gvn().type(argument(4))->isa_instptr();
2254   const TypeInstPtr* elem_klass_to     = gvn().type(argument(5))->isa_instptr();
2255   const TypeInt*     vlen_to           = gvn().type(argument(6))->isa_int();
2256 
2257   if (opr == nullptr ||
2258       vector_klass_from == nullptr || elem_klass_from == nullptr || vlen_from == nullptr ||
2259       vector_klass_to   == nullptr || elem_klass_to   == nullptr || vlen_to   == nullptr) {
2260     return false; // dead code
2261   }
2262   if (!opr->is_con() ||
2263       vector_klass_from->const_oop() == nullptr || elem_klass_from->const_oop() == nullptr || !vlen_from->is_con() ||
2264       vector_klass_to->const_oop() == nullptr || elem_klass_to->const_oop() == nullptr || !vlen_to->is_con()) {
2265     log_if_needed("  ** missing constant: opr=%s vclass_from=%s etype_from=%s vlen_from=%s vclass_to=%s etype_to=%s vlen_to=%s",
2266                     NodeClassNames[argument(0)->Opcode()],
2267                     NodeClassNames[argument(1)->Opcode()],
2268                     NodeClassNames[argument(2)->Opcode()],
2269                     NodeClassNames[argument(3)->Opcode()],
2270                     NodeClassNames[argument(4)->Opcode()],
2271                     NodeClassNames[argument(5)->Opcode()],
2272                     NodeClassNames[argument(6)->Opcode()]);
2273     return false; // not enough info for intrinsification
2274   }
2275   if (!is_klass_initialized(vector_klass_from) || !is_klass_initialized(vector_klass_to)) {
2276     log_if_needed("  ** klass argument not initialized");
2277     return false;
2278   }
2279 
2280   assert(opr->get_con() == VectorSupport::VECTOR_OP_CAST  ||
2281          opr->get_con() == VectorSupport::VECTOR_OP_UCAST ||
2282          opr->get_con() == VectorSupport::VECTOR_OP_REINTERPRET, "wrong opcode");
2283   bool is_cast = (opr->get_con() == VectorSupport::VECTOR_OP_CAST || opr->get_con() == VectorSupport::VECTOR_OP_UCAST);
2284   bool is_ucast = (opr->get_con() == VectorSupport::VECTOR_OP_UCAST);
2285 
2286   ciKlass* vbox_klass_from = vector_klass_from->const_oop()->as_instance()->java_lang_Class_klass();
2287   ciKlass* vbox_klass_to = vector_klass_to->const_oop()->as_instance()->java_lang_Class_klass();
2288 
2289   bool is_mask = is_vector_mask(vbox_klass_from);
2290 
2291   ciType* elem_type_from = elem_klass_from->const_oop()->as_instance()->java_mirror_type();
2292   if (!elem_type_from->is_primitive_type()) {
2293     return false; // should be primitive type
2294   }
2295   BasicType elem_bt_from = elem_type_from->basic_type();
2296   ciType* elem_type_to = elem_klass_to->const_oop()->as_instance()->java_mirror_type();
2297   if (!elem_type_to->is_primitive_type()) {
2298     return false; // should be primitive type
2299   }
2300   BasicType elem_bt_to = elem_type_to->basic_type();
2301 
2302   int num_elem_from = vlen_from->get_con();
2303   int num_elem_to = vlen_to->get_con();
2304 
2305   // Check whether we can unbox to appropriate size. Even with casting, checking for reinterpret is needed
2306   // since we may need to change size.
2307   if (!arch_supports_vector(Op_VectorReinterpret,
2308                             num_elem_from,
2309                             elem_bt_from,
2310                             is_mask ? VecMaskUseAll : VecMaskNotUsed)) {
2311     log_if_needed("  ** not supported: arity=1 op=%s/1 vlen1=%d etype1=%s ismask=%d",
2312                     is_cast ? "cast" : "reinterpret",
2313                     num_elem_from, type2name(elem_bt_from), is_mask);
2314     return false;
2315   }
2316 
2317   // Check whether we can support resizing/reinterpreting to the new size.
2318   if (!arch_supports_vector(Op_VectorReinterpret,
2319                             num_elem_to,
2320                             elem_bt_to,
2321                             is_mask ? VecMaskUseAll : VecMaskNotUsed)) {
2322     log_if_needed("  ** not supported: arity=1 op=%s/2 vlen2=%d etype2=%s ismask=%d",
2323                     is_cast ? "cast" : "reinterpret",
2324                     num_elem_to, type2name(elem_bt_to), is_mask);
2325     return false;
2326   }
2327 
2328   // At this point, we know that both input and output vector registers are supported
2329   // by the architecture. Next check if the casted type is simply to same type - which means
2330   // that it is actually a resize and not a cast.
2331   if (is_cast && elem_bt_from == elem_bt_to) {
2332     is_cast = false;
2333   }
2334 
2335   const TypeInstPtr* vbox_type_from = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass_from);
2336 
2337   Node* opd1 = unbox_vector(argument(7), vbox_type_from, elem_bt_from, num_elem_from);
2338   if (opd1 == nullptr) {
2339     return false;
2340   }
2341 
2342   const TypeVect* src_type = TypeVect::make(elem_bt_from, num_elem_from, is_mask);
2343   const TypeVect* dst_type = TypeVect::make(elem_bt_to, num_elem_to, is_mask);
2344 
2345   // Safety check to prevent casting if source mask is of type vector
2346   // and destination mask of type predicate vector and vice-versa.
2347   // From X86 standpoint, this case will only arise over KNL target,
2348   // where certain masks (depending on the species) are either propagated
2349   // through a vector or predicate register.
2350   if (is_mask &&
2351       ((src_type->isa_vectmask() == nullptr && dst_type->isa_vectmask()) ||
2352        (dst_type->isa_vectmask() == nullptr && src_type->isa_vectmask()))) {
2353     return false;
2354   }
2355 
2356   Node* op = opd1;
2357   if (is_cast) {
2358     assert(!is_mask || num_elem_from == num_elem_to, "vector mask cast needs the same elem num");
2359     int cast_vopc = VectorCastNode::opcode(-1, elem_bt_from, !is_ucast);
2360 
2361     // Make sure that vector cast is implemented to particular type/size combination if it is
2362     // not a mask casting.
2363     if (!is_mask && !arch_supports_vector(cast_vopc, num_elem_to, elem_bt_to, VecMaskNotUsed)) {
2364       log_if_needed("  ** not supported: arity=1 op=cast#%d/3 vlen2=%d etype2=%s ismask=%d",
2365                       cast_vopc, num_elem_to, type2name(elem_bt_to), is_mask);
2366       return false;
2367     }
2368 
2369     if (num_elem_from < num_elem_to) {
2370       // Since input and output number of elements are not consistent, we need to make sure we
2371       // properly size. Thus, first make a cast that retains the number of elements from source.
2372       int num_elem_for_cast = num_elem_from;
2373 
2374       // It is possible that arch does not support this intermediate vector size
2375       // TODO More complex logic required here to handle this corner case for the sizes.
2376       if (!arch_supports_vector(cast_vopc, num_elem_for_cast, elem_bt_to, VecMaskNotUsed)) {
2377         log_if_needed("  ** not supported: arity=1 op=cast#%d/4 vlen1=%d etype2=%s ismask=%d",
2378                         cast_vopc,
2379                         num_elem_for_cast, type2name(elem_bt_to), is_mask);
2380         return false;
2381       }
2382 
2383       op = gvn().transform(VectorCastNode::make(cast_vopc, op, elem_bt_to, num_elem_for_cast));
2384       // Now ensure that the destination gets properly resized to needed size.
2385       op = gvn().transform(new VectorReinterpretNode(op, op->bottom_type()->is_vect(), dst_type));
2386     } else if (num_elem_from > num_elem_to) {
2387       // Since number of elements from input is larger than output, simply reduce size of input
2388       // (we are supposed to drop top elements anyway).
2389       int num_elem_for_resize = num_elem_to;
2390 
2391       // It is possible that arch does not support this intermediate vector size
2392       // TODO More complex logic required here to handle this corner case for the sizes.
2393       if (!arch_supports_vector(Op_VectorReinterpret,
2394                                 num_elem_for_resize,
2395                                 elem_bt_from,
2396                                 VecMaskNotUsed)) {
2397         log_if_needed("  ** not supported: arity=1 op=cast/5 vlen2=%d etype1=%s ismask=%d",
2398                         num_elem_for_resize, type2name(elem_bt_from), is_mask);
2399         return false;
2400       }
2401 
2402       const TypeVect* resize_type = TypeVect::make(elem_bt_from, num_elem_for_resize);
2403       op = gvn().transform(new VectorReinterpretNode(op, src_type, resize_type));
2404       op = gvn().transform(VectorCastNode::make(cast_vopc, op, elem_bt_to, num_elem_to));
2405     } else { // num_elem_from == num_elem_to
2406       if (is_mask) {
2407         // Make sure that cast for vector mask is implemented to particular type/size combination.
2408         if (!arch_supports_vector(Op_VectorMaskCast, num_elem_to, elem_bt_to, VecMaskNotUsed)) {
2409           log_if_needed("  ** not supported: arity=1 op=maskcast vlen2=%d etype2=%s ismask=%d",
2410                           num_elem_to, type2name(elem_bt_to), is_mask);
2411           return false;
2412         }
2413         op = gvn().transform(new VectorMaskCastNode(op, dst_type));
2414       } else {
2415         // Since input and output number of elements match, and since we know this vector size is
2416         // supported, simply do a cast with no resize needed.
2417         op = gvn().transform(VectorCastNode::make(cast_vopc, op, elem_bt_to, num_elem_to));
2418       }
2419     }
2420   } else if (!Type::equals(src_type, dst_type)) {
2421     assert(!is_cast, "must be reinterpret");
2422     op = gvn().transform(new VectorReinterpretNode(op, src_type, dst_type));
2423   }
2424 
2425   const TypeInstPtr* vbox_type_to = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass_to);
2426   Node* vbox = box_vector(op, vbox_type_to, elem_bt_to, num_elem_to);
2427   set_result(vbox);
2428   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem_to * type2aelembytes(elem_bt_to))));
2429   return true;
2430 }
2431 
2432 //  public static
2433 //  <V extends Vector<E>,
2434 //   E>
2435 //  V insert(Class<? extends V> vectorClass, Class<E> elementType, int vlen,
2436 //           V vec, int ix, long val,
2437 //           VecInsertOp<V> defaultImpl)
2438 bool LibraryCallKit::inline_vector_insert() {
2439   const TypeInstPtr* vector_klass = gvn().type(argument(0))->isa_instptr();
2440   const TypeInstPtr* elem_klass   = gvn().type(argument(1))->isa_instptr();
2441   const TypeInt*     vlen         = gvn().type(argument(2))->isa_int();
2442   const TypeInt*     idx          = gvn().type(argument(4))->isa_int();
2443 
2444   if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || idx == nullptr) {
2445     return false; // dead code
2446   }
2447   if (vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con() || !idx->is_con()) {
2448     log_if_needed("  ** missing constant: vclass=%s etype=%s vlen=%s idx=%s",
2449                     NodeClassNames[argument(0)->Opcode()],
2450                     NodeClassNames[argument(1)->Opcode()],
2451                     NodeClassNames[argument(2)->Opcode()],
2452                     NodeClassNames[argument(4)->Opcode()]);
2453     return false; // not enough info for intrinsification
2454   }
2455   if (!is_klass_initialized(vector_klass)) {
2456     log_if_needed("  ** klass argument not initialized");
2457     return false;
2458   }
2459   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
2460   if (!elem_type->is_primitive_type()) {
2461     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
2462     return false; // should be primitive type
2463   }
2464   BasicType elem_bt = elem_type->basic_type();
2465   int num_elem = vlen->get_con();
2466   if (!arch_supports_vector(Op_VectorInsert, num_elem, elem_bt, VecMaskNotUsed)) {
2467     log_if_needed("  ** not supported: arity=1 op=insert vlen=%d etype=%s ismask=no",
2468                     num_elem, type2name(elem_bt));
2469     return false; // not supported
2470   }
2471 
2472   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
2473   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
2474 
2475   Node* opd = unbox_vector(argument(3), vbox_type, elem_bt, num_elem);
2476   if (opd == nullptr) {
2477     return false;
2478   }
2479 
2480   Node* insert_val = argument(5);
2481   assert(gvn().type(insert_val)->isa_long() != nullptr, "expected to be long");
2482 
2483   // Convert insert value back to its appropriate type.
2484   switch (elem_bt) {
2485     case T_BYTE:
2486       insert_val = gvn().transform(new ConvL2INode(insert_val, TypeInt::BYTE));
2487       break;
2488     case T_SHORT:
2489       insert_val = gvn().transform(new ConvL2INode(insert_val, TypeInt::SHORT));
2490       break;
2491     case T_INT:
2492       insert_val = gvn().transform(new ConvL2INode(insert_val));
2493       break;
2494     case T_FLOAT:
2495       insert_val = gvn().transform(new ConvL2INode(insert_val));
2496       insert_val = gvn().transform(new MoveI2FNode(insert_val));
2497       break;
2498     case T_DOUBLE:
2499       insert_val = gvn().transform(new MoveL2DNode(insert_val));
2500       break;
2501     case T_LONG:
2502       // no conversion needed
2503       break;
2504     default: fatal("%s", type2name(elem_bt)); break;
2505   }
2506 
2507   Node* operation = gvn().transform(VectorInsertNode::make(opd, insert_val, idx->get_con(), gvn()));
2508 
2509   Node* vbox = box_vector(operation, vbox_type, elem_bt, num_elem);
2510   set_result(vbox);
2511   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
2512   return true;
2513 }
2514 
2515 //  public static
2516 //  <VM extends VectorPayload,
2517 //   E>
2518 //  long extract(Class<? extends VM> vClass, Class<E> eClass,
2519 //               int length,
2520 //               VM vm, int i,
2521 //               VecExtractOp<VM> defaultImpl)
2522 bool LibraryCallKit::inline_vector_extract() {
2523   const TypeInstPtr* vector_klass = gvn().type(argument(0))->isa_instptr();
2524   const TypeInstPtr* elem_klass   = gvn().type(argument(1))->isa_instptr();
2525   const TypeInt*     vlen         = gvn().type(argument(2))->isa_int();
2526   const TypeInt*     idx          = gvn().type(argument(4))->isa_int();
2527 
2528   if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || idx == nullptr) {
2529     return false; // dead code
2530   }
2531   if (vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con() || !idx->is_con()) {
2532     log_if_needed("  ** missing constant: vclass=%s etype=%s vlen=%s",
2533                     NodeClassNames[argument(0)->Opcode()],
2534                     NodeClassNames[argument(1)->Opcode()],
2535                     NodeClassNames[argument(2)->Opcode()]);
2536     return false; // not enough info for intrinsification
2537   }
2538   if (!is_klass_initialized(vector_klass)) {
2539     log_if_needed("  ** klass argument not initialized");
2540     return false;
2541   }
2542   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
2543   if (!elem_type->is_primitive_type()) {
2544     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
2545     return false; // should be primitive type
2546   }
2547   BasicType elem_bt = elem_type->basic_type();
2548   int num_elem = vlen->get_con();
2549 
2550   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
2551   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
2552 
2553   Node* opd = nullptr;
2554 
2555   if (is_vector_mask(vbox_klass)) {
2556     // vbox_klass is mask. This is used for VectorMask.laneIsSet(int).
2557 
2558     Node* pos = argument(4); // can be variable
2559     if (arch_supports_vector(Op_ExtractUB, num_elem, elem_bt, VecMaskUseAll)) {
2560       // Transform mask to vector with type of boolean and utilize ExtractUB node.
2561       opd = unbox_vector(argument(3), vbox_type, elem_bt, num_elem);
2562       if (opd == nullptr) {
2563         return false;
2564       }
2565       opd = gvn().transform(VectorStoreMaskNode::make(gvn(), opd, elem_bt, num_elem));
2566       opd = gvn().transform(new ExtractUBNode(opd, pos));
2567       opd = gvn().transform(new ConvI2LNode(opd));
2568     } else if (arch_supports_vector(Op_VectorMaskToLong, num_elem, elem_bt, VecMaskUseLoad)) {
2569       opd = unbox_vector(argument(3), vbox_type, elem_bt, num_elem);
2570       if (opd == nullptr) {
2571         return false;
2572       }
2573       // VectorMaskToLongNode requires the input is either a mask or a vector with BOOLEAN type.
2574       if (opd->bottom_type()->isa_vectmask() == nullptr) {
2575         opd = gvn().transform(VectorStoreMaskNode::make(gvn(), opd, elem_bt, num_elem));
2576       }
2577       // ((toLong() >>> pos) & 1L
2578       opd = gvn().transform(new VectorMaskToLongNode(opd, TypeLong::LONG));
2579       opd = gvn().transform(new URShiftLNode(opd, pos));
2580       opd = gvn().transform(new AndLNode(opd, gvn().makecon(TypeLong::ONE)));
2581     } else {
2582       log_if_needed("  ** Rejected mask extraction because architecture does not support it");
2583       return false; // not supported
2584     }
2585   } else {
2586     // vbox_klass is vector. This is used for Vector.lane(int).
2587     if (!idx->is_con()) {
2588       log_if_needed("  ** missing constant: idx=%s", NodeClassNames[argument(4)->Opcode()]);
2589       return false; // not enough info for intrinsification
2590     }
2591 
2592     int vopc = ExtractNode::opcode(elem_bt);
2593     if (!arch_supports_vector(vopc, num_elem, elem_bt, VecMaskNotUsed)) {
2594       log_if_needed("  ** not supported: arity=1 op=extract vlen=%d etype=%s ismask=no",
2595                       num_elem, type2name(elem_bt));
2596       return false; // not supported
2597     }
2598 
2599     opd = unbox_vector(argument(3), vbox_type, elem_bt, num_elem);
2600     if (opd == nullptr) {
2601       return false;
2602     }
2603     ConINode* idx_con = gvn().intcon(idx->get_con())->as_ConI();
2604 
2605     opd = gvn().transform(ExtractNode::make(opd, idx_con, elem_bt));
2606     switch (elem_bt) {
2607       case T_BYTE:
2608       case T_SHORT:
2609       case T_INT: {
2610         opd = gvn().transform(new ConvI2LNode(opd));
2611         break;
2612       }
2613       case T_FLOAT: {
2614         opd = gvn().transform(new MoveF2INode(opd));
2615         opd = gvn().transform(new ConvI2LNode(opd));
2616         break;
2617       }
2618       case T_DOUBLE: {
2619         opd = gvn().transform(new MoveD2LNode(opd));
2620         break;
2621       }
2622       case T_LONG: {
2623         // no conversion needed
2624         break;
2625       }
2626       default: fatal("%s", type2name(elem_bt));
2627     }
2628   }
2629   set_result(opd);
2630   return true;
2631 }
2632 
2633 static Node* LowerSelectFromTwoVectorOperation(PhaseGVN& phase, Node* index_vec, Node* src1, Node* src2, const TypeVect* vt) {
2634   int num_elem = vt->length();
2635   BasicType elem_bt = vt->element_basic_type();
2636 
2637   // Lower selectFrom operation into its constituent operations.
2638   //   SelectFromTwoVectorNode =
2639   //     (VectorBlend
2640   //         (VectorRearrange SRC1 (WRAPED_INDEX AND (VLEN-1))
2641   //         (VectorRearrange SRC2 (WRAPED_INDEX AND (VLEN-1))
2642   //      MASK)
2643   // Where
2644   //   WRAPED_INDEX are computed by wrapping incoming indexes
2645   //   to two vector index range [0, VLEN*2) and
2646   //   MASK = WRAPED_INDEX < VLEN
2647   //
2648   // IR lowering prevents intrinsification failure and associated argument
2649   // boxing penalties.
2650   //
2651 
2652   BasicType shuffle_bt = elem_bt;
2653   if (shuffle_bt == T_FLOAT) {
2654     shuffle_bt = T_INT;
2655   } else if (shuffle_bt == T_DOUBLE) {
2656     shuffle_bt = T_LONG;
2657   }
2658   const TypeVect* st = TypeVect::make(shuffle_bt, num_elem);
2659 
2660   // Cast index vector to the corresponding bit type
2661   if (elem_bt != shuffle_bt) {
2662     int cast_vopc = VectorCastNode::opcode(0, elem_bt, true);
2663     index_vec = phase.transform(VectorCastNode::make(cast_vopc, index_vec, shuffle_bt, num_elem));
2664   }
2665 
2666   // Wrap indexes into two vector index range [0, VLEN * 2)
2667   Node* two_vect_lane_cnt_m1 = phase.makecon(TypeInteger::make(2 * num_elem - 1, 2 * num_elem - 1, Type::WidenMin, shuffle_bt == T_LONG ? T_LONG : T_INT));
2668   Node* bcast_two_vect_lane_cnt_m1_vec = phase.transform(VectorNode::scalar2vector(two_vect_lane_cnt_m1, num_elem,
2669                                                                                    shuffle_bt, false));
2670   index_vec = phase.transform(VectorNode::make(Op_AndV, index_vec, bcast_two_vect_lane_cnt_m1_vec, st));
2671 
2672   // Compute the blend mask for merging two independently permitted vectors
2673   // using shuffle index in two vector index range [0, VLEN * 2).
2674   BoolTest::mask pred = BoolTest::le;
2675   ConINode* pred_node = phase.makecon(TypeInt::make(pred))->as_ConI();
2676   const TypeVect* vmask_type = TypeVect::makemask(shuffle_bt, num_elem);
2677   Node* lane_cnt_m1 = phase.makecon(TypeInteger::make(num_elem - 1, num_elem - 1, Type::WidenMin, shuffle_bt == T_LONG ? T_LONG : T_INT));
2678   Node* bcast_lane_cnt_m1_vec = phase.transform(VectorNode::scalar2vector(lane_cnt_m1, num_elem, shuffle_bt, false));
2679   Node* mask = phase.transform(new VectorMaskCmpNode(pred, index_vec, bcast_lane_cnt_m1_vec, pred_node, vmask_type));
2680 
2681   // Rearrange expects the indexes to lie within single vector index range [0, VLEN).
2682   Node* wrapped_index_vec = phase.transform(VectorNode::make(Op_AndV, index_vec, bcast_lane_cnt_m1_vec, st));
2683 
2684   // Load indexes from byte vector and appropriately transform them to target
2685   // specific permutation index format.
2686   if (Matcher::vector_rearrange_requires_load_shuffle(shuffle_bt, num_elem)) {
2687     wrapped_index_vec = phase.transform(new VectorLoadShuffleNode(wrapped_index_vec, st));
2688   }
2689 
2690   vmask_type = TypeVect::makemask(elem_bt, num_elem);
2691   mask = phase.transform(new VectorMaskCastNode(mask, vmask_type));
2692 
2693   Node* p1 = phase.transform(new VectorRearrangeNode(src1, wrapped_index_vec));
2694   Node* p2 = phase.transform(new VectorRearrangeNode(src2, wrapped_index_vec));
2695 
2696   return new VectorBlendNode(p2, p1, mask);
2697 }
2698 
2699 //  public static
2700 //  <V extends Vector<E>,
2701 //   E>
2702 //  V selectFromTwoVectorOp(Class<? extends V> vClass, Class<E> eClass, int length,
2703 //                          V v1, V v2, V v3,
2704 //                          SelectFromTwoVector<V> defaultImpl)
2705 bool LibraryCallKit::inline_vector_select_from_two_vectors() {
2706   const TypeInstPtr* vector_klass = gvn().type(argument(0))->isa_instptr();
2707   const TypeInstPtr* elem_klass = gvn().type(argument(1))->isa_instptr();
2708   const TypeInt* vlen = gvn().type(argument(2))->isa_int();
2709 
2710   if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || vector_klass->const_oop() == nullptr ||
2711       elem_klass->const_oop() == nullptr ||!vlen->is_con()) {
2712     log_if_needed("  ** missing constant: vclass=%s etype=%s vlen=%s",
2713                     NodeClassNames[argument(0)->Opcode()],
2714                     NodeClassNames[argument(1)->Opcode()],
2715                     NodeClassNames[argument(2)->Opcode()]);
2716     return false; // not enough info for intrinsification
2717   }
2718 
2719   if (!is_klass_initialized(vector_klass)) {
2720     log_if_needed("  ** klass argument not initialized");
2721     return false;
2722   }
2723 
2724   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
2725   if (!elem_type->is_primitive_type()) {
2726     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
2727     return false; // should be primitive type
2728   }
2729 
2730   int num_elem = vlen->get_con();
2731   if (!is_power_of_2(num_elem)) {
2732     log_if_needed("  ** vlen is not power of two=%d", num_elem);
2733     return false;
2734   }
2735 
2736   BasicType elem_bt = elem_type->basic_type();
2737   BasicType index_elem_bt = elem_bt;
2738   if (elem_bt == T_FLOAT) {
2739     index_elem_bt = T_INT;
2740   } else if (elem_bt == T_DOUBLE) {
2741     index_elem_bt = T_LONG;
2742   }
2743 
2744   bool lowerSelectFromOp = false;
2745   if (!arch_supports_vector(Op_SelectFromTwoVector, num_elem, elem_bt, VecMaskNotUsed)) {
2746     int cast_vopc = VectorCastNode::opcode(-1, elem_bt, true);
2747     if ((elem_bt != index_elem_bt && !arch_supports_vector(cast_vopc, num_elem, index_elem_bt, VecMaskNotUsed)) ||
2748         !arch_supports_vector(Op_VectorMaskCmp, num_elem, index_elem_bt, VecMaskNotUsed)     ||
2749         !arch_supports_vector(Op_AndV, num_elem, index_elem_bt, VecMaskNotUsed)              ||
2750         !arch_supports_vector(Op_VectorMaskCast, num_elem, elem_bt, VecMaskNotUsed)          ||
2751         !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)             ||
2752         !arch_supports_vector(Op_VectorRearrange, num_elem, elem_bt, VecMaskNotUsed)         ||
2753         !arch_supports_vector(Op_VectorLoadShuffle, num_elem, index_elem_bt, VecMaskNotUsed) ||
2754         !arch_supports_vector(Op_Replicate, num_elem, index_elem_bt, VecMaskNotUsed)) {
2755       log_if_needed("  ** not supported: opc=%d vlen=%d etype=%s ismask=useload",
2756                     Op_SelectFromTwoVector, num_elem, type2name(elem_bt));
2757       return false; // not supported
2758     }
2759     lowerSelectFromOp = true;
2760   }
2761 
2762   int cast_vopc = VectorCastNode::opcode(-1, elem_bt, true);
2763   if (!lowerSelectFromOp) {
2764     if (!arch_supports_vector(Op_AndV, num_elem, index_elem_bt, VecMaskNotUsed)      ||
2765         !arch_supports_vector(Op_Replicate, num_elem, index_elem_bt, VecMaskNotUsed) ||
2766         (is_floating_point_type(elem_bt) &&
2767          !arch_supports_vector(cast_vopc, num_elem, index_elem_bt, VecMaskNotUsed))) {
2768       log_if_needed("  ** index wrapping not supported: vlen=%d etype=%s" ,
2769                      num_elem, type2name(elem_bt));
2770       return false; // not supported
2771     }
2772   }
2773 
2774   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
2775   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
2776 
2777   Node* opd1 = unbox_vector(argument(3), vbox_type, elem_bt, num_elem);
2778   if (opd1 == nullptr) {
2779     log_if_needed("  ** unbox failed v1=%s",
2780                   NodeClassNames[argument(3)->Opcode()]);
2781     return false;
2782   }
2783   Node* opd2 = unbox_vector(argument(4), vbox_type, elem_bt, num_elem);
2784   if (opd2 == nullptr) {
2785     log_if_needed("  ** unbox failed v2=%s",
2786                   NodeClassNames[argument(4)->Opcode()]);
2787     return false;
2788   }
2789   Node* opd3 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
2790   if (opd3 == nullptr) {
2791     log_if_needed("  ** unbox failed v3=%s",
2792                   NodeClassNames[argument(5)->Opcode()]);
2793     return false;
2794   }
2795 
2796   const TypeVect* vt = TypeVect::make(elem_bt, num_elem);
2797 
2798   Node* operation = nullptr;
2799   if (lowerSelectFromOp) {
2800     operation = gvn().transform(LowerSelectFromTwoVectorOperation(gvn(), opd1, opd2, opd3, vt));
2801   } else {
2802     if (index_elem_bt != elem_bt) {
2803       opd1 = gvn().transform(VectorCastNode::make(cast_vopc, opd1, index_elem_bt, num_elem));
2804     }
2805     int indexRangeMask = 2 * num_elem - 1;
2806     Node* wrap_mask = gvn().makecon(TypeInteger::make(indexRangeMask, indexRangeMask, Type::WidenMin, index_elem_bt != T_LONG ? T_INT : index_elem_bt));
2807     Node* wrap_mask_vec = gvn().transform(VectorNode::scalar2vector(wrap_mask, num_elem, index_elem_bt, false));
2808     opd1 = gvn().transform(VectorNode::make(Op_AndV, opd1, wrap_mask_vec, opd1->bottom_type()->is_vect()));
2809     operation = gvn().transform(VectorNode::make(Op_SelectFromTwoVector, opd1, opd2, opd3, vt));
2810   }
2811 
2812   // Wrap it up in VectorBox to keep object type information.
2813   Node* vbox = box_vector(operation, vbox_type, elem_bt, num_elem);
2814   set_result(vbox);
2815   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
2816   return true;
2817 }
2818 
2819 // public static
2820 // <V extends Vector<E>,
2821 //  M extends VectorMask<E>,
2822 //  E>
2823 //  V compressExpandOp(int opr,
2824 //                    Class<? extends V> vClass, Class<? extends M> mClass, Class<E> eClass,
2825 //                    int length, V v, M m,
2826 //                    CompressExpandOperation<V, M> defaultImpl)
2827 bool LibraryCallKit::inline_vector_compress_expand() {
2828   const TypeInt*     opr          = gvn().type(argument(0))->isa_int();
2829   const TypeInstPtr* vector_klass = gvn().type(argument(1))->isa_instptr();
2830   const TypeInstPtr* mask_klass   = gvn().type(argument(2))->isa_instptr();
2831   const TypeInstPtr* elem_klass   = gvn().type(argument(3))->isa_instptr();
2832   const TypeInt*     vlen         = gvn().type(argument(4))->isa_int();
2833 
2834   if (opr          == nullptr || !opr->is_con() ||
2835       vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
2836       mask_klass   == nullptr || mask_klass->const_oop()   == nullptr ||
2837       elem_klass   == nullptr || elem_klass->const_oop()   == nullptr ||
2838       vlen         == nullptr || !vlen->is_con()) {
2839     log_if_needed("  ** missing constant: opr=%s vclass=%s mclass=%s etype=%s vlen=%s",
2840                     NodeClassNames[argument(0)->Opcode()],
2841                     NodeClassNames[argument(1)->Opcode()],
2842                     NodeClassNames[argument(2)->Opcode()],
2843                     NodeClassNames[argument(3)->Opcode()],
2844                     NodeClassNames[argument(4)->Opcode()]);
2845     return false; // not enough info for intrinsification
2846   }
2847 
2848   if (!is_klass_initialized(vector_klass) || !is_klass_initialized(mask_klass)) {
2849     log_if_needed("  ** klass argument not initialized");
2850     return false;
2851   }
2852 
2853   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
2854   if (!elem_type->is_primitive_type()) {
2855     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
2856     return false; // should be primitive type
2857   }
2858 
2859   int num_elem = vlen->get_con();
2860   BasicType elem_bt = elem_type->basic_type();
2861   int opc = VectorSupport::vop2ideal(opr->get_con(), elem_bt);
2862 
2863   if (!arch_supports_vector(opc, num_elem, elem_bt, VecMaskUseLoad)) {
2864     log_if_needed("  ** not supported: opc=%d vlen=%d etype=%s ismask=useload",
2865                     opc, num_elem, type2name(elem_bt));
2866     return false; // not supported
2867   }
2868 
2869   Node* opd1 = nullptr;
2870   const TypeInstPtr* vbox_type = nullptr;
2871   if (opc != Op_CompressM) {
2872     ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
2873     vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
2874     opd1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem);
2875     if (opd1 == nullptr) {
2876       log_if_needed("  ** unbox failed vector=%s",
2877                       NodeClassNames[argument(5)->Opcode()]);
2878       return false;
2879     }
2880   }
2881 
2882   ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
2883   assert(is_vector_mask(mbox_klass), "argument(6) should be a mask class");
2884   const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass);
2885 
2886   Node* mask = unbox_vector(argument(6), mbox_type, elem_bt, num_elem);
2887   if (mask == nullptr) {
2888     log_if_needed("  ** unbox failed mask=%s",
2889                     NodeClassNames[argument(6)->Opcode()]);
2890     return false;
2891   }
2892 
2893   const TypeVect* vt = TypeVect::make(elem_bt, num_elem, opc == Op_CompressM);
2894   Node* operation = gvn().transform(VectorNode::make(opc, opd1, mask, vt));
2895 
2896   // Wrap it up in VectorBox to keep object type information.
2897   const TypeInstPtr* box_type = opc == Op_CompressM ? mbox_type : vbox_type;
2898   Node* vbox = box_vector(operation, box_type, elem_bt, num_elem);
2899   set_result(vbox);
2900   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
2901   return true;
2902 }
2903 
2904 // public static
2905 // <V extends Vector<E>,
2906 //  E,
2907 //  S extends VectorSpecies<E>>
2908 //  V indexVector(Class<? extends V> vClass, Class<E> eClass,
2909 //                int length,
2910 //                V v, int step, S s,
2911 //                IndexOperation<V, S> defaultImpl)
2912 bool LibraryCallKit::inline_index_vector() {
2913   const TypeInstPtr* vector_klass = gvn().type(argument(0))->isa_instptr();
2914   const TypeInstPtr* elem_klass   = gvn().type(argument(1))->isa_instptr();
2915   const TypeInt*     vlen         = gvn().type(argument(2))->isa_int();
2916 
2917   if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
2918       elem_klass   == nullptr || elem_klass->const_oop()   == nullptr ||
2919       vlen         == nullptr || !vlen->is_con() ) {
2920     log_if_needed("  ** missing constant: vclass=%s etype=%s vlen=%s",
2921                     NodeClassNames[argument(0)->Opcode()],
2922                     NodeClassNames[argument(1)->Opcode()],
2923                     NodeClassNames[argument(2)->Opcode()]);
2924     return false; // not enough info for intrinsification
2925   }
2926 
2927   if (!is_klass_initialized(vector_klass)) {
2928     log_if_needed("  ** klass argument not initialized");
2929     return false;
2930   }
2931 
2932   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
2933   if (!elem_type->is_primitive_type()) {
2934     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
2935     return false; // should be primitive type
2936   }
2937 
2938   int num_elem = vlen->get_con();
2939   BasicType elem_bt = elem_type->basic_type();
2940 
2941   // Check whether the iota index generation op is supported by the current hardware
2942   if (!arch_supports_vector(Op_VectorLoadConst, num_elem, elem_bt, VecMaskNotUsed)) {
2943     log_if_needed("  ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt));
2944     return false; // not supported
2945   }
2946 
2947   int mul_op = VectorSupport::vop2ideal(VectorSupport::VECTOR_OP_MUL, elem_bt);
2948   int vmul_op = VectorNode::opcode(mul_op, elem_bt);
2949   bool needs_mul = true;
2950   Node* scale = argument(4);
2951   const TypeInt* scale_type = gvn().type(scale)->isa_int();
2952   // Multiply is not needed if the scale is a constant "1".
2953   if (scale_type && scale_type->is_con() && scale_type->get_con() == 1) {
2954     needs_mul = false;
2955   } else {
2956     // Check whether the vector multiply op is supported by the current hardware
2957     if (!arch_supports_vector(vmul_op, num_elem, elem_bt, VecMaskNotUsed)) {
2958       log_if_needed("  ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt));
2959       return false; // not supported
2960     }
2961 
2962     // Check whether the scalar cast op is supported by the current hardware
2963     if (is_floating_point_type(elem_bt) || elem_bt == T_LONG) {
2964       int cast_op = elem_bt == T_LONG ? Op_ConvI2L :
2965                     elem_bt == T_FLOAT? Op_ConvI2F : Op_ConvI2D;
2966       if (!Matcher::match_rule_supported(cast_op)) {
2967         log_if_needed("  ** Rejected op (%s) because architecture does not support it",
2968                         NodeClassNames[cast_op]);
2969         return false; // not supported
2970       }
2971     }
2972   }
2973 
2974   ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass();
2975   const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
2976   Node* opd = unbox_vector(argument(3), vbox_type, elem_bt, num_elem);
2977   if (opd == nullptr) {
2978     log_if_needed("  ** unbox failed vector=%s",
2979                     NodeClassNames[argument(3)->Opcode()]);
2980     return false;
2981   }
2982 
2983   int add_op = VectorSupport::vop2ideal(VectorSupport::VECTOR_OP_ADD, elem_bt);
2984   int vadd_op = VectorNode::opcode(add_op, elem_bt);
2985   bool needs_add = true;
2986   // The addition is not needed if all the element values of "opd" are zero
2987   if (VectorNode::is_all_zeros_vector(opd)) {
2988     needs_add = false;
2989   } else {
2990     // Check whether the vector addition op is supported by the current hardware
2991     if (!arch_supports_vector(vadd_op, num_elem, elem_bt, VecMaskNotUsed)) {
2992       log_if_needed("  ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt));
2993       return false; // not supported
2994     }
2995   }
2996 
2997   // Compute the iota indice vector
2998   const TypeVect* vt = TypeVect::make(elem_bt, num_elem);
2999   Node* index = gvn().transform(new VectorLoadConstNode(gvn().makecon(TypeInt::ZERO), vt));
3000 
3001   // Broadcast the "scale" to a vector, and multiply the "scale" with iota indice vector.
3002   if (needs_mul) {
3003     switch (elem_bt) {
3004       case T_BOOLEAN: // fall-through
3005       case T_BYTE:    // fall-through
3006       case T_SHORT:   // fall-through
3007       case T_CHAR:    // fall-through
3008       case T_INT: {
3009         // no conversion needed
3010         break;
3011       }
3012       case T_LONG: {
3013         scale = gvn().transform(new ConvI2LNode(scale));
3014         break;
3015       }
3016       case T_FLOAT: {
3017         scale = gvn().transform(new ConvI2FNode(scale));
3018         break;
3019       }
3020       case T_DOUBLE: {
3021         scale = gvn().transform(new ConvI2DNode(scale));
3022         break;
3023       }
3024       default: fatal("%s", type2name(elem_bt));
3025     }
3026     scale = gvn().transform(VectorNode::scalar2vector(scale, num_elem, elem_bt));
3027     index = gvn().transform(VectorNode::make(vmul_op, index, scale, vt));
3028   }
3029 
3030   // Add "opd" if addition is needed.
3031   if (needs_add) {
3032     index = gvn().transform(VectorNode::make(vadd_op, opd, index, vt));
3033   }
3034   Node* vbox = box_vector(index, vbox_type, elem_bt, num_elem);
3035   set_result(vbox);
3036   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
3037   return true;
3038 }
3039 
3040 // public static
3041 // <E,
3042 //  M extends VectorMask<E>>
3043 // M indexPartiallyInUpperRange(Class<? extends M> mClass, Class<E> eClass, int length,
3044 //                              long offset, long limit,
3045 //                              IndexPartiallyInUpperRangeOperation<E, M> defaultImpl)
3046 bool LibraryCallKit::inline_index_partially_in_upper_range() {
3047   const TypeInstPtr* mask_klass   = gvn().type(argument(0))->isa_instptr();
3048   const TypeInstPtr* elem_klass   = gvn().type(argument(1))->isa_instptr();
3049   const TypeInt*     vlen         = gvn().type(argument(2))->isa_int();
3050 
3051   if (mask_klass == nullptr || mask_klass->const_oop() == nullptr ||
3052       elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
3053       vlen       == nullptr || !vlen->is_con()) {
3054     log_if_needed("  ** missing constant: mclass=%s etype=%s vlen=%s",
3055                     NodeClassNames[argument(0)->Opcode()],
3056                     NodeClassNames[argument(1)->Opcode()],
3057                     NodeClassNames[argument(2)->Opcode()]);
3058     return false; // not enough info for intrinsification
3059   }
3060 
3061   if (!is_klass_initialized(mask_klass)) {
3062     log_if_needed("  ** klass argument not initialized");
3063     return false;
3064   }
3065 
3066   ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
3067   if (!elem_type->is_primitive_type()) {
3068     log_if_needed("  ** not a primitive bt=%d", elem_type->basic_type());
3069     return false; // should be primitive type
3070   }
3071 
3072   int num_elem = vlen->get_con();
3073   BasicType elem_bt = elem_type->basic_type();
3074 
3075   // Check whether the necessary ops are supported by current hardware.
3076   bool supports_mask_gen = arch_supports_vector(Op_VectorMaskGen, num_elem, elem_bt, VecMaskUseStore);
3077   if (!supports_mask_gen) {
3078     if (!arch_supports_vector(Op_VectorLoadConst, num_elem, elem_bt, VecMaskNotUsed) ||
3079         !arch_supports_vector(Op_Replicate, num_elem, elem_bt, VecMaskNotUsed) ||
3080         !arch_supports_vector(Op_VectorMaskCmp, num_elem, elem_bt, VecMaskUseStore)) {
3081       log_if_needed("  ** not supported: vlen=%d etype=%s", num_elem, type2name(elem_bt));
3082       return false; // not supported
3083     }
3084 
3085     // Check whether the scalar cast operation is supported by current hardware.
3086     if (elem_bt != T_LONG) {
3087       int cast_op = is_integral_type(elem_bt) ? Op_ConvL2I
3088                                               : (elem_bt == T_FLOAT ? Op_ConvL2F : Op_ConvL2D);
3089       if (!Matcher::match_rule_supported(cast_op)) {
3090         log_if_needed("  ** Rejected op (%s) because architecture does not support it",
3091                         NodeClassNames[cast_op]);
3092         return false; // not supported
3093       }
3094     }
3095   }
3096 
3097   Node* offset = argument(3);
3098   Node* limit = argument(5);
3099   if (offset == nullptr || limit == nullptr) {
3100     log_if_needed("  ** offset or limit argument is null");
3101     return false; // not supported
3102   }
3103 
3104   ciKlass* box_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass();
3105   assert(is_vector_mask(box_klass), "argument(0) should be a mask class");
3106   const TypeInstPtr* box_type = TypeInstPtr::make_exact(TypePtr::NotNull, box_klass);
3107 
3108   // We assume "offset > 0 && limit >= offset && limit - offset < num_elem".
3109   // So directly get indexLimit with "indexLimit = limit - offset".
3110   Node* indexLimit = gvn().transform(new SubLNode(limit, offset));
3111   Node* mask = nullptr;
3112   if (supports_mask_gen) {
3113     mask = gvn().transform(VectorMaskGenNode::make(indexLimit, elem_bt, num_elem));
3114   } else {
3115     // Generate the vector mask based on "mask = iota < indexLimit".
3116     // Broadcast "indexLimit" to a vector.
3117     switch (elem_bt) {
3118       case T_BOOLEAN: // fall-through
3119       case T_BYTE:    // fall-through
3120       case T_SHORT:   // fall-through
3121       case T_CHAR:    // fall-through
3122       case T_INT: {
3123         indexLimit = gvn().transform(new ConvL2INode(indexLimit));
3124         break;
3125       }
3126       case T_DOUBLE: {
3127         indexLimit = gvn().transform(new ConvL2DNode(indexLimit));
3128         break;
3129       }
3130       case T_FLOAT: {
3131         indexLimit = gvn().transform(new ConvL2FNode(indexLimit));
3132         break;
3133       }
3134       case T_LONG: {
3135         // no conversion needed
3136         break;
3137       }
3138       default: fatal("%s", type2name(elem_bt));
3139     }
3140     indexLimit = gvn().transform(VectorNode::scalar2vector(indexLimit, num_elem, elem_bt));
3141 
3142     // Load the "iota" vector.
3143     const TypeVect* vt = TypeVect::make(elem_bt, num_elem);
3144     Node* iota = gvn().transform(new VectorLoadConstNode(gvn().makecon(TypeInt::ZERO), vt));
3145 
3146     // Compute the vector mask with "mask = iota < indexLimit".
3147     ConINode* pred_node = (ConINode*)gvn().makecon(TypeInt::make(BoolTest::lt));
3148     const TypeVect* vmask_type = TypeVect::makemask(elem_bt, num_elem);
3149     mask = gvn().transform(new VectorMaskCmpNode(BoolTest::lt, iota, indexLimit, pred_node, vmask_type));
3150   }
3151   Node* vbox = box_vector(mask, box_type, elem_bt, num_elem);
3152   set_result(vbox);
3153   C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt))));
3154   return true;
3155 }
3156 
3157 #undef non_product_log_if_needed
3158 #undef log_if_needed