< prev index next >

src/share/vm/opto/library_call.cpp

Print this page




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileBroker.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/callGenerator.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/idealKit.hpp"
  36 #include "opto/mathexactnode.hpp"
  37 #include "opto/mulnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/runtime.hpp"

  40 #include "opto/subnode.hpp"
  41 #include "prims/nativeLookup.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "trace/traceMacros.hpp"


  44 
  45 class LibraryIntrinsic : public InlineCallGenerator {
  46   // Extend the set of intrinsics known to the runtime:
  47  public:
  48  private:
  49   bool             _is_virtual;
  50   bool             _does_virtual_dispatch;
  51   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  52   int8_t           _last_predicate; // Last generated predicate
  53   vmIntrinsics::ID _intrinsic_id;
  54 
  55  public:
  56   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  57     : InlineCallGenerator(m),
  58       _is_virtual(is_virtual),
  59       _does_virtual_dispatch(does_virtual_dispatch),
  60       _predicates_count((int8_t)predicates_count),
  61       _last_predicate((int8_t)-1),
  62       _intrinsic_id(id)
  63   {


 208   bool inline_trig(vmIntrinsics::ID id);
 209   bool inline_math(vmIntrinsics::ID id);
 210   template <typename OverflowOp>
 211   bool inline_math_overflow(Node* arg1, Node* arg2);
 212   void inline_math_mathExact(Node* math, Node* test);
 213   bool inline_math_addExactI(bool is_increment);
 214   bool inline_math_addExactL(bool is_increment);
 215   bool inline_math_multiplyExactI();
 216   bool inline_math_multiplyExactL();
 217   bool inline_math_negateExactI();
 218   bool inline_math_negateExactL();
 219   bool inline_math_subtractExactI(bool is_decrement);
 220   bool inline_math_subtractExactL(bool is_decrement);
 221   bool inline_exp();
 222   bool inline_pow();
 223   Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
 224   bool inline_min_max(vmIntrinsics::ID id);
 225   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 226   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 227   int classify_unsafe_addr(Node* &base, Node* &offset);
 228   Node* make_unsafe_address(Node* base, Node* offset);
 229   // Helper for inline_unsafe_access.
 230   // Generates the guards that check whether the result of
 231   // Unsafe.getObject should be recorded in an SATB log buffer.
 232   void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
 233   bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned);
 234   bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
 235   static bool klass_needs_init_guard(Node* kls);
 236   bool inline_unsafe_allocate();
 237   bool inline_unsafe_copyMemory();
 238   bool inline_native_currentThread();
 239 #ifdef TRACE_HAVE_INTRINSICS
 240   bool inline_native_classID();
 241   bool inline_native_threadID();
 242 #endif
 243   bool inline_native_time_funcs(address method, const char* funcName);
 244   bool inline_native_isInterrupted();
 245   bool inline_native_Class_query(vmIntrinsics::ID id);
 246   bool inline_native_subtype_check();
 247 
 248   bool inline_native_newArray();


 313   Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
 314   bool inline_sha_implCompress(vmIntrinsics::ID id);
 315   bool inline_digestBase_implCompressMB(int predicate);
 316   bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
 317                                  bool long_state, address stubAddr, const char *stubName,
 318                                  Node* src_start, Node* ofs, Node* limit);
 319   Node* get_state_from_sha_object(Node *sha_object);
 320   Node* get_state_from_sha5_object(Node *sha_object);
 321   Node* inline_digestBase_implCompressMB_predicate(int predicate);
 322   bool inline_encodeISOArray();
 323   bool inline_updateCRC32();
 324   bool inline_updateBytesCRC32();
 325   bool inline_updateByteBufferCRC32();
 326   bool inline_multiplyToLen();
 327   bool inline_squareToLen();
 328   bool inline_mulAdd();
 329   bool inline_montgomeryMultiply();
 330   bool inline_montgomerySquare();
 331 
 332   bool inline_profileBoolean();




 333 };
 334 
 335 
 336 //---------------------------make_vm_intrinsic----------------------------
 337 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
 338   vmIntrinsics::ID id = m->intrinsic_id();
 339   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 340 
 341   ccstr disable_intr = NULL;
 342 
 343   if ((DisableIntrinsic[0] != '\0'
 344        && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) ||
 345       (method_has_option_value("DisableIntrinsic", disable_intr)
 346        && strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL)) {
 347     // disabled by a user request on the command line:
 348     // example: -XX:DisableIntrinsic=_hashCode,_getClass
 349     return NULL;
 350   }
 351 
 352   if (!m->is_loaded()) {


1145 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1146   ciKlass*    thread_klass = env()->Thread_klass();
1147   const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1148   Node* thread = _gvn.transform(new (C) ThreadLocalNode());
1149   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
1150   Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
1151   tls_output = thread;
1152   return threadObj;
1153 }
1154 
1155 
1156 //------------------------------make_string_method_node------------------------
1157 // Helper method for String intrinsic functions. This version is called
1158 // with str1 and str2 pointing to String object nodes.
1159 //
1160 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1, Node* str2) {
1161   Node* no_ctrl = NULL;
1162 
1163   // Get start addr of string
1164   Node* str1_value   = load_String_value(no_ctrl, str1);


1165   Node* str1_offset  = load_String_offset(no_ctrl, str1);
1166   Node* str1_start   = array_element_address(str1_value, str1_offset, T_CHAR);
1167 
1168   // Get length of string 1
1169   Node* str1_len  = load_String_length(no_ctrl, str1);
1170 
1171   Node* str2_value   = load_String_value(no_ctrl, str2);


1172   Node* str2_offset  = load_String_offset(no_ctrl, str2);
1173   Node* str2_start   = array_element_address(str2_value, str2_offset, T_CHAR);
1174 
1175   Node* str2_len = NULL;
1176   Node* result = NULL;
1177 
1178   switch (opcode) {
1179   case Op_StrIndexOf:
1180     // Get length of string 2
1181     str2_len = load_String_length(no_ctrl, str2);
1182 
1183     result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
1184                                  str1_start, str1_len, str2_start, str2_len);
1185     break;
1186   case Op_StrComp:
1187     // Get length of string 2
1188     str2_len = load_String_length(no_ctrl, str2);
1189 
1190     result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
1191                                  str1_start, str1_len, str2_start, str2_len);


1245   }
1246   set_result(make_string_method_node(Op_StrComp, receiver, arg));
1247   return true;
1248 }
1249 
1250 //------------------------------inline_string_equals------------------------
1251 bool LibraryCallKit::inline_string_equals() {
1252   Node* receiver = null_check_receiver();
1253   // NOTE: Do not null check argument for String.equals() because spec
1254   // allows to specify NULL as argument.
1255   Node* argument = this->argument(1);
1256   if (stopped()) {
1257     return true;
1258   }
1259 
1260   // paths (plus control) merge
1261   RegionNode* region = new (C) RegionNode(5);
1262   Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
1263 
1264   // does source == target string?


1265   Node* cmp = _gvn.transform(new (C) CmpPNode(receiver, argument));
1266   Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));
1267 
1268   Node* if_eq = generate_slow_guard(bol, NULL);
1269   if (if_eq != NULL) {
1270     // receiver == argument
1271     phi->init_req(2, intcon(1));
1272     region->init_req(2, if_eq);
1273   }
1274 
1275   // get String klass for instanceOf
1276   ciInstanceKlass* klass = env()->String_klass();
1277 
1278   if (!stopped()) {
1279     Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
1280     Node* cmp  = _gvn.transform(new (C) CmpINode(inst, intcon(1)));
1281     Node* bol  = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
1282 
1283     Node* inst_false = generate_guard(bol, NULL, PROB_MIN);
1284     //instanceOf == true, fallthrough
1285 
1286     if (inst_false != NULL) {
1287       phi->init_req(3, intcon(0));
1288       region->init_req(3, inst_false);
1289     }
1290   }
1291 
1292   if (!stopped()) {
1293     const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
1294 
1295     // Properly cast the argument to String
1296     argument = _gvn.transform(new (C) CheckCastPPNode(control(), argument, string_type));
1297     // This path is taken only when argument's type is String:NotNull.
1298     argument = cast_not_null(argument, false);
1299 
1300     Node* no_ctrl = NULL;
1301 
1302     // Get start addr of receiver
1303     Node* receiver_val    = load_String_value(no_ctrl, receiver);


1304     Node* receiver_offset = load_String_offset(no_ctrl, receiver);
1305     Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR);
1306 
1307     // Get length of receiver
1308     Node* receiver_cnt  = load_String_length(no_ctrl, receiver);
1309 
1310     // Get start addr of argument
1311     Node* argument_val    = load_String_value(no_ctrl, argument);


1312     Node* argument_offset = load_String_offset(no_ctrl, argument);
1313     Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
1314 
1315     // Get length of argument
1316     Node* argument_cnt  = load_String_length(no_ctrl, argument);
1317 
1318     // Check for receiver count != argument count
1319     Node* cmp = _gvn.transform(new(C) CmpINode(receiver_cnt, argument_cnt));
1320     Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::ne));
1321     Node* if_ne = generate_slow_guard(bol, NULL);
1322     if (if_ne != NULL) {
1323       phi->init_req(4, intcon(0));
1324       region->init_req(4, if_ne);
1325     }
1326 
1327     // Check for count == 0 is done by assembler code for StrEquals.
1328 
1329     if (!stopped()) {
1330       Node* equals = make_string_method_node(Op_StrEquals, receiver_start, receiver_cnt, argument_start, argument_cnt);
1331       phi->init_req(1, equals);
1332       region->init_req(1, control());
1333     }
1334   }
1335 
1336   // post merge
1337   set_control(_gvn.transform(region));
1338   record_for_igvn(region);
1339 
1340   set_result(_gvn.transform(phi));
1341   return true;
1342 }
1343 
1344 //------------------------------inline_array_equals----------------------------
1345 bool LibraryCallKit::inline_array_equals() {
1346   Node* arg1 = argument(0);
1347   Node* arg2 = argument(1);




1348   set_result(_gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
1349   return true;
1350 }
1351 
1352 // Java version of String.indexOf(constant string)
1353 // class StringDecl {
1354 //   StringDecl(char[] ca) {
1355 //     offset = 0;
1356 //     count = ca.length;
1357 //     value = ca;
1358 //   }
1359 //   int offset;
1360 //   int count;
1361 //   char[] value;
1362 // }
1363 //
1364 // static int string_indexOf_J(StringDecl string_object, char[] target_object,
1365 //                             int targetOffset, int cache_i, int md2) {
1366 //   int cache = cache_i;
1367 //   int sourceOffset = string_object.offset;


1403 //     }
1404 //     if ((cache & (1 << src)) == 0) {
1405 //       i += targetCountLess1;
1406 //     } // using "i += targetCount;" and an "else i++;" causes a jump to jump.
1407 //     i++;
1408 //   }
1409 //   return -1;
1410 // }
1411 
1412 //------------------------------string_indexOf------------------------
1413 Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_array, jint targetOffset_i,
1414                                      jint cache_i, jint md2_i) {
1415 
1416   Node* no_ctrl  = NULL;
1417   float likely   = PROB_LIKELY(0.9);
1418   float unlikely = PROB_UNLIKELY(0.9);
1419 
1420   const int nargs = 0; // no arguments to push back for uncommon trap in predicate
1421 
1422   Node* source        = load_String_value(no_ctrl, string_object);


1423   Node* sourceOffset  = load_String_offset(no_ctrl, string_object);
1424   Node* sourceCount   = load_String_length(no_ctrl, string_object);
1425 
1426   Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)));
1427   jint target_length = target_array->length();
1428   const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
1429   const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
1430 
1431   // String.value field is known to be @Stable.
1432   if (UseImplicitStableValues) {
1433     target = cast_array_to_stable(target, target_type);
1434   }
1435 


1436   IdealKit kit(this, false, true);
1437 #define __ kit.
1438   Node* zero             = __ ConI(0);
1439   Node* one              = __ ConI(1);
1440   Node* cache            = __ ConI(cache_i);
1441   Node* md2              = __ ConI(md2_i);
1442   Node* lastChar         = __ ConI(target_array->char_at(target_length - 1));
1443   Node* targetCount      = __ ConI(target_length);
1444   Node* targetCountLess1 = __ ConI(target_length - 1);
1445   Node* targetOffset     = __ ConI(targetOffset_i);
1446   Node* sourceEnd        = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
1447 
1448   IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
1449   Node* outer_loop = __ make_label(2 /* goto */);
1450   Node* return_    = __ make_label(1);
1451 
1452   __ set(rtn,__ ConI(-1));
1453   __ loop(this, nargs, i, sourceOffset, BoolTest::lt, sourceEnd); {
1454        Node* i2  = __ AddI(__ value(i), targetCountLess1);
1455        // pin to prohibit loading of "next iteration" value which may SEGV (rare)


1503       UseSSE42Intrinsics) {
1504     // Generate SSE4.2 version of indexOf
1505     // We currently only have match rules that use SSE4.2
1506 
1507     receiver = null_check(receiver);
1508     arg      = null_check(arg);
1509     if (stopped()) {
1510       return true;
1511     }
1512 
1513     ciInstanceKlass* str_klass = env()->String_klass();
1514     const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(str_klass);
1515 
1516     // Make the merge point
1517     RegionNode* result_rgn = new (C) RegionNode(4);
1518     Node*       result_phi = new (C) PhiNode(result_rgn, TypeInt::INT);
1519     Node* no_ctrl  = NULL;
1520 
1521     // Get start addr of source string
1522     Node* source = load_String_value(no_ctrl, receiver);


1523     Node* source_offset = load_String_offset(no_ctrl, receiver);
1524     Node* source_start = array_element_address(source, source_offset, T_CHAR);
1525 
1526     // Get length of source string
1527     Node* source_cnt  = load_String_length(no_ctrl, receiver);
1528 
1529     // Get start addr of substring
1530     Node* substr = load_String_value(no_ctrl, arg);


1531     Node* substr_offset = load_String_offset(no_ctrl, arg);
1532     Node* substr_start = array_element_address(substr, substr_offset, T_CHAR);
1533 
1534     // Get length of source string
1535     Node* substr_cnt  = load_String_length(no_ctrl, arg);
1536 
1537     // Check for substr count > string count
1538     Node* cmp = _gvn.transform(new(C) CmpINode(substr_cnt, source_cnt));
1539     Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::gt));
1540     Node* if_gt = generate_slow_guard(bol, NULL);
1541     if (if_gt != NULL) {
1542       result_phi->init_req(2, intcon(-1));
1543       result_rgn->init_req(2, if_gt);
1544     }
1545 
1546     if (!stopped()) {
1547       // Check for substr count == 0
1548       cmp = _gvn.transform(new(C) CmpINode(substr_cnt, intcon(0)));
1549       bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
1550       Node* if_zero = generate_slow_guard(bol, NULL);


1583     ciObject* v = str->field_value_by_offset(java_lang_String::value_offset_in_bytes()).as_object();
1584     ciTypeArray* pat = v->as_type_array(); // pattern (argument) character array
1585 
1586     int o;
1587     int c;
1588     if (java_lang_String::has_offset_field()) {
1589       o = str->field_value_by_offset(java_lang_String::offset_offset_in_bytes()).as_int();
1590       c = str->field_value_by_offset(java_lang_String::count_offset_in_bytes()).as_int();
1591     } else {
1592       o = 0;
1593       c = pat->length();
1594     }
1595 
1596     // constant strings have no offset and count == length which
1597     // simplifies the resulting code somewhat so lets optimize for that.
1598     if (o != 0 || c != pat->length()) {
1599      return false;
1600     }
1601 
1602     receiver = null_check(receiver, T_OBJECT);

1603     // NOTE: No null check on the argument is needed since it's a constant String oop.
1604     if (stopped()) {
1605       return true;
1606     }
1607 
1608     // The null string as a pattern always returns 0 (match at beginning of string)
1609     if (c == 0) {
1610       set_result(intcon(0));
1611       return true;
1612     }
1613 
1614     // Generate default indexOf
1615     jchar lastChar = pat->char_at(o + (c - 1));
1616     int cache = 0;
1617     int i;
1618     for (i = 0; i < c - 1; i++) {
1619       assert(i < pat->length(), "out of range");
1620       cache |= (1 << (pat->char_at(o + i) & (sizeof(cache) * BitsPerByte - 1)));
1621     }
1622 


2348     // Base is never null => always a heap address.
2349     if (base_type->ptr() == TypePtr::NotNull) {
2350       return Type::OopPtr;
2351     }
2352     // Offset is small => always a heap address.
2353     const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2354     if (offset_type != NULL &&
2355         base_type->offset() == 0 &&     // (should always be?)
2356         offset_type->_lo >= 0 &&
2357         !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2358       return Type::OopPtr;
2359     }
2360     // Otherwise, it might either be oop+off or NULL+addr.
2361     return Type::AnyPtr;
2362   } else {
2363     // No information:
2364     return Type::AnyPtr;
2365   }
2366 }
2367 
2368 inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
2369   int kind = classify_unsafe_addr(base, offset);
2370   if (kind == Type::RawPtr) {
2371     return basic_plus_adr(top(), base, offset);
2372   } else {































2373     return basic_plus_adr(base, offset);
2374   }
2375 }
2376 
2377 //--------------------------inline_number_methods-----------------------------
2378 // inline int     Integer.numberOfLeadingZeros(int)
2379 // inline int        Long.numberOfLeadingZeros(long)
2380 //
2381 // inline int     Integer.numberOfTrailingZeros(int)
2382 // inline int        Long.numberOfTrailingZeros(long)
2383 //
2384 // inline int     Integer.bitCount(int)
2385 // inline int        Long.bitCount(long)
2386 //
2387 // inline char  Character.reverseBytes(char)
2388 // inline short     Short.reverseBytes(short)
2389 // inline int     Integer.reverseBytes(int)
2390 // inline long       Long.reverseBytes(long)
2391 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2392   Node* arg = argument(0);


2404   case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
2405   default:  fatal_unexpected_iid(id);  break;
2406   }
2407   set_result(_gvn.transform(n));
2408   return true;
2409 }
2410 
2411 //----------------------------inline_unsafe_access----------------------------
2412 
2413 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2414 
2415 // Helper that guards and inserts a pre-barrier.
2416 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2417                                         Node* pre_val, bool need_mem_bar) {
2418   // We could be accessing the referent field of a reference object. If so, when G1
2419   // is enabled, we need to log the value in the referent field in an SATB buffer.
2420   // This routine performs some compile time filters and generates suitable
2421   // runtime filters that guard the pre-barrier code.
2422   // Also add memory barrier for non volatile load from the referent field
2423   // to prevent commoning of loads across safepoint.
2424   if (!UseG1GC && !need_mem_bar)
2425     return;
2426 
2427   // Some compile time checks.
2428 
2429   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2430   const TypeX* otype = offset->find_intptr_t_type();
2431   if (otype != NULL && otype->is_con() &&
2432       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2433     // Constant offset but not the reference_offset so just return
2434     return;
2435   }
2436 
2437   // We only need to generate the runtime guards for instances.
2438   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2439   if (btype != NULL) {
2440     if (btype->isa_aryptr()) {
2441       // Array type so nothing to do
2442       return;
2443     }
2444 


2604 
2605   // Build address expression.  See the code in inline_unsafe_prefetch.
2606   Node* adr;
2607   Node* heap_base_oop = top();
2608   Node* offset = top();
2609   Node* val;
2610 
2611   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2612   Node* base = argument(1);  // type: oop
2613 
2614   if (!is_native_ptr) {
2615     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2616     offset = argument(2);  // type: long
2617     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2618     // to be plain byte offsets, which are also the same as those accepted
2619     // by oopDesc::field_base.
2620     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2621            "fieldOffset must be byte-scaled");
2622     // 32-bit machines ignore the high half!
2623     offset = ConvL2X(offset);
2624     adr = make_unsafe_address(base, offset);
2625     heap_base_oop = base;
2626     val = is_store ? argument(4) : NULL;
2627   } else {
2628     Node* ptr = argument(1);  // type: long
2629     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2630     adr = make_unsafe_address(NULL, ptr);
2631     val = is_store ? argument(3) : NULL;
2632   }
2633 
2634   if ((_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) && type == T_OBJECT) {
2635     return false; // off-heap oop accesses are not supported
2636   }
2637 
2638   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2639 
2640   // Try to categorize the address.
2641   Compile::AliasType* alias_type = C->alias_type(adr_type);
2642   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2643 
2644   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2645       alias_type->adr_type() == TypeAryPtr::RANGE) {
2646     return false; // not supported
2647   }
2648 
2649   bool mismatched = false;
2650   BasicType bt = alias_type->basic_type();


2661     }
2662     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2663       // Don't intrinsify mismatched object accesses
2664       return false;
2665     }
2666     mismatched = (bt != type);
2667   }
2668 
2669   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2670 
2671   // First guess at the value type.
2672   const Type *value_type = Type::get_const_basic_type(type);
2673 
2674   // We will need memory barriers unless we can determine a unique
2675   // alias category for this reference.  (Note:  If for some reason
2676   // the barriers get omitted and the unsafe reference begins to "pollute"
2677   // the alias analysis of the rest of the graph, either Compile::can_alias
2678   // or Compile::must_alias will throw a diagnostic assert.)
2679   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2680 








2681   // If we are reading the value of the referent field of a Reference
2682   // object (either by using Unsafe directly or through reflection)
2683   // then, if G1 is enabled, we need to record the referent in an
2684   // SATB log buffer using the pre-barrier mechanism.
2685   // Also we need to add memory barrier to prevent commoning reads
2686   // from this field across safepoint since GC can change its value.
2687   bool need_read_barrier = !is_native_ptr && !is_store &&
2688                            offset != top() && heap_base_oop != top();
2689 
2690   if (!is_store && type == T_OBJECT) {
2691     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2692     if (tjp != NULL) {
2693       value_type = tjp;
2694     }
2695   }
2696 
2697   receiver = null_check(receiver);
2698   if (stopped()) {
2699     return true;
2700   }
2701   // Heap pointers get a null-check from the interpreter,
2702   // as a courtesy.  However, this is not guaranteed by Unsafe,
2703   // and it is not possible to fully distinguish unintended nulls
2704   // from intended ones in this API.
2705 



2706   if (is_volatile) {
2707     // We need to emit leading and trailing CPU membars (see below) in
2708     // addition to memory membars when is_volatile. This is a little
2709     // too strong, but avoids the need to insert per-alias-type
2710     // volatile membars (for stores; compare Parse::do_put_xxx), which
2711     // we cannot do effectively here because we probably only have a
2712     // rough approximation of type.
2713     need_mem_bar = true;
2714     // For Stores, place a memory ordering barrier now.
2715     if (is_store) {
2716       insert_mem_bar(Op_MemBarRelease);
2717     } else {
2718       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2719         insert_mem_bar(Op_MemBarVolatile);
2720       }
2721     }
2722   }
2723 
2724   // Memory barrier to prevent normal and 'unsafe' accesses from
2725   // bypassing each other.  Happens after null checks, so the
2726   // exception paths do not take memory state from the memory barrier,
2727   // so there's no problems making a strong assert about mixing users
2728   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2729   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2730   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2731 
2732   if (!is_store) {
2733     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2734     // To be valid, unsafe loads may depend on other conditions than
2735     // the one that guards them: pin the Load node
2736     Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2737     // load value
2738     switch (type) {
2739     case T_BOOLEAN:
2740     case T_CHAR:
2741     case T_BYTE:
2742     case T_SHORT:
2743     case T_INT:
2744     case T_LONG:
2745     case T_FLOAT:
2746     case T_DOUBLE:
2747       break;
2748     case T_OBJECT:
2749       if (need_read_barrier) {
2750         insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2751       }
2752       break;
2753     case T_ADDRESS:
2754       // Cast to an int type.
2755       p = _gvn.transform(new (C) CastP2XNode(NULL, p));
2756       p = ConvX2UL(p);
2757       break;
2758     default:
2759       fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2760       break;
2761     }
2762     // The load node has the control of the preceding MemBarCPUOrder.  All
2763     // following nodes will have the control of the MemBarCPUOrder inserted at
2764     // the end of this method.  So, pushing the load onto the stack at a later
2765     // point is fine.
2766     set_result(p);
2767   } else {
2768     // place effect of store into memory
2769     switch (type) {
2770     case T_DOUBLE:
2771       val = dstore_rounding(val);
2772       break;
2773     case T_ADDRESS:
2774       // Repackage the long as a pointer.
2775       val = ConvL2X(val);
2776       val = _gvn.transform(new (C) CastX2PNode(val));
2777       break;
2778     }
2779 
2780     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2781     if (type == T_OBJECT ) {
2782       (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);

2783     } else {
2784       (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2785     }
2786   }
2787 
2788   if (is_volatile) {
2789     if (!is_store) {
2790       insert_mem_bar(Op_MemBarAcquire);

2791     } else {
2792       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2793         insert_mem_bar(Op_MemBarVolatile);

2794       }
2795     }
2796   }
2797 
2798   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2799 
2800   return true;
2801 }
2802 
2803 //----------------------------inline_unsafe_prefetch----------------------------
2804 
2805 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2806 #ifndef PRODUCT
2807   {
2808     ResourceMark rm;
2809     // Check the signatures.
2810     ciSignature* sig = callee()->signature();
2811 #ifdef ASSERT
2812     // Object getObject(Object base, int/long offset), etc.
2813     BasicType rtype = sig->return_type()->basic_type();


2830     null_check_receiver();
2831     if (stopped()) {
2832       return true;
2833     }
2834   }
2835 
2836   // Build address expression.  See the code in inline_unsafe_access.
2837   Node *adr;
2838   if (!is_native_ptr) {
2839     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2840     Node* base   = argument(idx + 0);  // type: oop
2841     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2842     Node* offset = argument(idx + 1);  // type: long
2843     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2844     // to be plain byte offsets, which are also the same as those accepted
2845     // by oopDesc::field_base.
2846     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2847            "fieldOffset must be byte-scaled");
2848     // 32-bit machines ignore the high half!
2849     offset = ConvL2X(offset);
2850     adr = make_unsafe_address(base, offset);
2851   } else {
2852     Node* ptr = argument(idx + 0);  // type: long
2853     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2854     adr = make_unsafe_address(NULL, ptr);
2855   }
2856 
2857   // Generate the read or write prefetch
2858   Node *prefetch;
2859   if (is_store) {
2860     prefetch = new (C) PrefetchWriteNode(i_o(), adr);
2861   } else {
2862     prefetch = new (C) PrefetchReadNode(i_o(), adr);
2863   }
2864   prefetch->init_req(0, control());
2865   set_i_o(_gvn.transform(prefetch));
2866 
2867   return true;
2868 }
2869 
2870 //----------------------------inline_unsafe_load_store----------------------------
2871 // This method serves a couple of different customers (depending on LoadStoreKind):
2872 //
2873 // LS_cmpxchg:
2874 //   public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);


2937     receiver = argument(0);  // type: oop
2938     base     = argument(1);  // type: oop
2939     offset   = argument(2);  // type: long
2940     oldval   = argument(4);  // type: oop, int, or long
2941     newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
2942   } else if (kind == LS_xadd || kind == LS_xchg){
2943     receiver = argument(0);  // type: oop
2944     base     = argument(1);  // type: oop
2945     offset   = argument(2);  // type: long
2946     oldval   = NULL;
2947     newval   = argument(4);  // type: oop, int, or long
2948   }
2949 
2950   // Build field offset expression.
2951   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2952   // to be plain byte offsets, which are also the same as those accepted
2953   // by oopDesc::field_base.
2954   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2955   // 32-bit machines ignore the high half of long offsets
2956   offset = ConvL2X(offset);
2957   Node* adr = make_unsafe_address(base, offset);
2958   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2959 
2960   Compile::AliasType* alias_type = C->alias_type(adr_type);
2961   BasicType bt = alias_type->basic_type();
2962   if (bt != T_ILLEGAL &&
2963       ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
2964     // Don't intrinsify mismatched object accesses.
2965     return false;
2966   }
2967 
2968   // For CAS, unlike inline_unsafe_access, there seems no point in
2969   // trying to refine types. Just use the coarse types here.
2970   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2971   const Type *value_type = Type::get_const_basic_type(type);
2972 
2973   if (kind == LS_xchg && type == T_OBJECT) {
2974     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2975     if (tjp != NULL) {
2976       value_type = tjp;
2977     }
2978   }
2979 
2980   // Null check receiver.
2981   receiver = null_check(receiver);
2982   if (stopped()) {
2983     return true;
2984   }
2985 
2986   int alias_idx = C->get_alias_index(adr_type);
2987 
2988   // Memory-model-wise, a LoadStore acts like a little synchronized
2989   // block, so needs barriers on each side.  These don't translate
2990   // into actual barriers on most machines, but we still need rest of
2991   // compiler to respect ordering.
2992 
2993   insert_mem_bar(Op_MemBarRelease);
2994   insert_mem_bar(Op_MemBarCPUOrder);
2995 
2996   // 4984716: MemBars must be inserted before this
2997   //          memory node in order to avoid a false
2998   //          dependency which will confuse the scheduler.
2999   Node *mem = memory(alias_idx);
3000 
3001   // For now, we handle only those cases that actually exist: ints,
3002   // longs, and Object. Adding others should be straightforward.
3003   Node* load_store = NULL;
3004   switch(type) {
3005   case T_INT:
3006     if (kind == LS_xadd) {
3007       load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
3008     } else if (kind == LS_xchg) {
3009       load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
3010     } else if (kind == LS_cmpxchg) {
3011       load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
3012     } else {
3013       ShouldNotReachHere();
3014     }
3015     break;
3016   case T_LONG:
3017     if (kind == LS_xadd) {
3018       load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
3019     } else if (kind == LS_xchg) {
3020       load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
3021     } else if (kind == LS_cmpxchg) {
3022       load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
3023     } else {
3024       ShouldNotReachHere();
3025     }
3026     break;
3027   case T_OBJECT:
3028     // Transformation of a value which could be NULL pointer (CastPP #NULL)
3029     // could be delayed during Parse (for example, in adjust_map_after_if()).
3030     // Execute transformation here to avoid barrier generation in such case.
3031     if (_gvn.type(newval) == TypePtr::NULL_PTR)
3032       newval = _gvn.makecon(TypePtr::NULL_PTR);
3033 


3034     // Reference stores need a store barrier.
3035     if (kind == LS_xchg) {
3036       // If pre-barrier must execute before the oop store, old value will require do_load here.
3037       if (!can_move_pre_barrier()) {
3038         pre_barrier(true /* do_load*/,
3039                     control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
3040                     NULL /* pre_val*/,
3041                     T_OBJECT);
3042       } // Else move pre_barrier to use load_store value, see below.
3043     } else if (kind == LS_cmpxchg) {
3044       // Same as for newval above:
3045       if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
3046         oldval = _gvn.makecon(TypePtr::NULL_PTR);
3047       }
3048       // The only known value which might get overwritten is oldval.
3049       pre_barrier(false /* do_load */,
3050                   control(), NULL, NULL, max_juint, NULL, NULL,
3051                   oldval /* pre_val */,
3052                   T_OBJECT);
3053     } else {


3072       if (kind == LS_xchg) {
3073         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
3074       } else {
3075         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3076         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
3077       }
3078     }
3079     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3080     break;
3081   default:
3082     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3083     break;
3084   }
3085 
3086   // SCMemProjNodes represent the memory state of a LoadStore. Their
3087   // main role is to prevent LoadStore nodes from being optimized away
3088   // when their results aren't used.
3089   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3090   set_memory(proj, alias_idx);
3091 


3092   if (type == T_OBJECT && kind == LS_xchg) {
3093 #ifdef _LP64
3094     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3095       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3096     }
3097 #endif
3098     if (can_move_pre_barrier()) {
3099       // Don't need to load pre_val. The old value is returned by load_store.
3100       // The pre_barrier can execute after the xchg as long as no safepoint
3101       // gets inserted between them.
3102       pre_barrier(false /* do_load */,
3103                   control(), NULL, NULL, max_juint, NULL, NULL,
3104                   load_store /* pre_val */,
3105                   T_OBJECT);
3106     }
3107   }
3108 
3109   // Add the trailing membar surrounding the access
3110   insert_mem_bar(Op_MemBarCPUOrder);
3111   insert_mem_bar(Op_MemBarAcquire);

3112 
3113   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3114   set_result(load_store);
3115   return true;
3116 }
3117 
3118 //----------------------------inline_unsafe_ordered_store----------------------
3119 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3120 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3121 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3122 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3123   // This is another variant of inline_unsafe_access, differing in
3124   // that it always issues store-store ("release") barrier and ensures
3125   // store-atomicity (which only matters for "long").
3126 
3127   if (callee()->is_static())  return false;  // caller must have the capability!
3128 
3129 #ifndef PRODUCT
3130   {
3131     ResourceMark rm;


3142 #endif //PRODUCT
3143 
3144   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
3145 
3146   // Get arguments:
3147   Node* receiver = argument(0);  // type: oop
3148   Node* base     = argument(1);  // type: oop
3149   Node* offset   = argument(2);  // type: long
3150   Node* val      = argument(4);  // type: oop, int, or long
3151 
3152   // Null check receiver.
3153   receiver = null_check(receiver);
3154   if (stopped()) {
3155     return true;
3156   }
3157 
3158   // Build field offset expression.
3159   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
3160   // 32-bit machines ignore the high half of long offsets
3161   offset = ConvL2X(offset);
3162   Node* adr = make_unsafe_address(base, offset);
3163   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
3164   const Type *value_type = Type::get_const_basic_type(type);
3165   Compile::AliasType* alias_type = C->alias_type(adr_type);
3166 
3167   insert_mem_bar(Op_MemBarRelease);
3168   insert_mem_bar(Op_MemBarCPUOrder);
3169   // Ensure that the store is atomic for longs:
3170   const bool require_atomic_access = true;
3171   Node* store;
3172   if (type == T_OBJECT) // reference stores need a store barrier.

3173     store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
3174   else {
3175     store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
3176   }
3177   insert_mem_bar(Op_MemBarCPUOrder);
3178   return true;
3179 }
3180 
3181 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
3182   // Regardless of form, don't allow previous ld/st to move down,
3183   // then issue acquire, release, or volatile mem_bar.
3184   insert_mem_bar(Op_MemBarCPUOrder);
3185   switch(id) {
3186     case vmIntrinsics::_loadFence:
3187       insert_mem_bar(Op_LoadFence);
3188       return true;
3189     case vmIntrinsics::_storeFence:
3190       insert_mem_bar(Op_StoreFence);
3191       return true;
3192     case vmIntrinsics::_fullFence:
3193       insert_mem_bar(Op_MemBarVolatile);
3194       return true;


3332     no_int_result_path   = 1, // t == Thread.current() && !TLS._osthread._interrupted
3333     no_clear_result_path = 2, // t == Thread.current() &&  TLS._osthread._interrupted && !clear_int
3334     slow_result_path     = 3, // slow path: t.isInterrupted(clear_int)
3335     PATH_LIMIT
3336   };
3337 
3338   // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
3339   // out of the function.
3340   insert_mem_bar(Op_MemBarCPUOrder);
3341 
3342   RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT);
3343   PhiNode*    result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
3344 
3345   RegionNode* slow_region = new (C) RegionNode(1);
3346   record_for_igvn(slow_region);
3347 
3348   // (a) Receiving thread must be the current thread.
3349   Node* rec_thr = argument(0);
3350   Node* tls_ptr = NULL;
3351   Node* cur_thr = generate_current_thread(tls_ptr);


3352   Node* cmp_thr = _gvn.transform(new (C) CmpPNode(cur_thr, rec_thr));
3353   Node* bol_thr = _gvn.transform(new (C) BoolNode(cmp_thr, BoolTest::ne));
3354 
3355   generate_slow_guard(bol_thr, slow_region);
3356 
3357   // (b) Interrupt bit on TLS must be false.
3358   Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3359   Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3360   p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3361 
3362   // Set the control input on the field _interrupted read to prevent it floating up.
3363   Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
3364   Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0)));
3365   Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne));
3366 
3367   IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3368 
3369   // First fast path:  if (!TLS._interrupted) return false;
3370   Node* false_bit = _gvn.transform(new (C) IfFalseNode(iff_bit));
3371   result_rgn->init_req(no_int_result_path, false_bit);


3677   // Pull both arguments off the stack.
3678   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3679   args[0] = argument(0);
3680   args[1] = argument(1);
3681   Node* klasses[2];             // corresponding Klasses: superk, subk
3682   klasses[0] = klasses[1] = top();
3683 
3684   enum {
3685     // A full decision tree on {superc is prim, subc is prim}:
3686     _prim_0_path = 1,           // {P,N} => false
3687                                 // {P,P} & superc!=subc => false
3688     _prim_same_path,            // {P,P} & superc==subc => true
3689     _prim_1_path,               // {N,P} => false
3690     _ref_subtype_path,          // {N,N} & subtype check wins => true
3691     _both_ref_path,             // {N,N} & subtype check loses => false
3692     PATH_LIMIT
3693   };
3694 
3695   RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3696   Node*       phi    = new (C) PhiNode(region, TypeInt::BOOL);

3697   record_for_igvn(region);

3698 
3699   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3700   const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3701   int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3702 
3703   // First null-check both mirrors and load each mirror's klass metaobject.
3704   int which_arg;
3705   for (which_arg = 0; which_arg <= 1; which_arg++) {
3706     Node* arg = args[which_arg];
3707     arg = null_check(arg);
3708     if (stopped())  break;
3709     args[which_arg] = arg;
3710 
3711     Node* p = basic_plus_adr(arg, class_klass_offset);
3712     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3713     klasses[which_arg] = _gvn.transform(kls);
3714   }
3715 



3716   // Having loaded both klasses, test each for null.
3717   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3718   for (which_arg = 0; which_arg <= 1; which_arg++) {
3719     Node* kls = klasses[which_arg];
3720     Node* null_ctl = top();
3721     kls = null_check_oop(kls, &null_ctl, never_see_null);
3722     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3723     region->init_req(prim_path, null_ctl);
3724     if (stopped())  break;
3725     klasses[which_arg] = kls;
3726   }
3727 
3728   if (!stopped()) {
3729     // now we have two reference types, in klasses[0..1]
3730     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3731     Node* superk = klasses[0];  // the receiver
3732     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3733     // now we have a successful reference subtype check
3734     region->set_req(_ref_subtype_path, control());
3735   }
3736 
3737   // If both operands are primitive (both klasses null), then
3738   // we must return true when they are identical primitives.
3739   // It is convenient to test this after the first null klass check.
3740   set_control(region->in(_prim_0_path)); // go back to first null check
3741   if (!stopped()) {
3742     // Since superc is primitive, make a guard for the superc==subc case.
3743     Node* cmp_eq = _gvn.transform(new (C) CmpPNode(args[0], args[1]));
3744     Node* bol_eq = _gvn.transform(new (C) BoolNode(cmp_eq, BoolTest::eq));
3745     generate_guard(bol_eq, region, PROB_FAIR);
3746     if (region->req() == PATH_LIMIT+1) {
3747       // A guard was added.  If the added guard is taken, superc==subc.
3748       region->swap_edges(PATH_LIMIT, _prim_same_path);
3749       region->del_req(PATH_LIMIT);
3750     }
3751     region->set_req(_prim_0_path, control()); // Not equal after all.
3752   }
3753 
3754   // these are the only paths that produce 'true':
3755   phi->set_req(_prim_same_path,   intcon(1));
3756   phi->set_req(_ref_subtype_path, intcon(1));
3757 
3758   // pull together the cases:
3759   assert(region->req() == PATH_LIMIT, "sane region");

3760   for (uint i = 1; i < region->req(); i++) {
3761     Node* ctl = region->in(i);
3762     if (ctl == NULL || ctl == top()) {
3763       region->set_req(i, top());
3764       phi   ->set_req(i, top());
3765     } else if (phi->in(i) == NULL) {


3766       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3767     }


3768   }
3769 
3770   set_control(_gvn.transform(region));
3771   set_result(_gvn.transform(phi));

3772   return true;
3773 }
3774 
3775 //---------------------generate_array_guard_common------------------------
3776 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3777                                                   bool obj_array, bool not_array) {
3778   // If obj_array/non_array==false/false:
3779   // Branch around if the given klass is in fact an array (either obj or prim).
3780   // If obj_array/non_array==false/true:
3781   // Branch around if the given klass is not an array klass of any kind.
3782   // If obj_array/non_array==true/true:
3783   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3784   // If obj_array/non_array==true/false:
3785   // Branch around if the kls is an oop array (Object[] or subtype)
3786   //
3787   // Like generate_guard, adds a new path onto the region.
3788   jint  layout_con = 0;
3789   Node* layout_val = get_layout_helper(kls, layout_con);
3790   if (layout_val == NULL) {
3791     bool query = (obj_array


3966     // Without this the new_array would throw
3967     // NegativeArraySizeException but IllegalArgumentException is what
3968     // should be thrown
3969     generate_negative_guard(length, bailout, &length);
3970 
3971     if (bailout->req() > 1) {
3972       PreserveJVMState pjvms(this);
3973       set_control(_gvn.transform(bailout));
3974       uncommon_trap(Deoptimization::Reason_intrinsic,
3975                     Deoptimization::Action_maybe_recompile);
3976     }
3977 
3978     if (!stopped()) {
3979       // How many elements will we copy from the original?
3980       // The answer is MinI(orig_length - start, length).
3981       Node* orig_tail = _gvn.transform(new (C) SubINode(orig_length, start));
3982       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3983 
3984       newcopy = new_array(klass_node, length, 0);  // no argments to push
3985 


3986       // Generate a direct call to the right arraycopy function(s).
3987       // We know the copy is disjoint but we might not know if the
3988       // oop stores need checking.
3989       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3990       // This will fail a store-check if x contains any non-nulls.
3991       bool disjoint_bases = true;
3992       // if start > orig_length then the length of the copy may be
3993       // negative.
3994       bool length_never_negative = !is_copyOfRange;
3995       generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
3996                          original, start, newcopy, intcon(0), moved,
3997                          disjoint_bases, length_never_negative);
3998     }
3999   } // original reexecute is set back here
4000 
4001   C->set_has_split_ifs(true); // Has chance for split-if optimization
4002   if (!stopped()) {
4003     set_result(newcopy);
4004   }
4005   return true;


4413 #endif //_LP64
4414 
4415 //----------------------inline_unsafe_copyMemory-------------------------
4416 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4417 bool LibraryCallKit::inline_unsafe_copyMemory() {
4418   if (callee()->is_static())  return false;  // caller must have the capability!
4419   null_check_receiver();  // null-check receiver
4420   if (stopped())  return true;
4421 
4422   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
4423 
4424   Node* src_ptr =         argument(1);   // type: oop
4425   Node* src_off = ConvL2X(argument(2));  // type: long
4426   Node* dst_ptr =         argument(4);   // type: oop
4427   Node* dst_off = ConvL2X(argument(5));  // type: long
4428   Node* size    = ConvL2X(argument(7));  // type: long
4429 
4430   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4431          "fieldOffset must be byte-scaled");
4432 
4433   Node* src = make_unsafe_address(src_ptr, src_off);
4434   Node* dst = make_unsafe_address(dst_ptr, dst_off);
4435 
4436   // Conservatively insert a memory barrier on all memory slices.
4437   // Do not let writes of the copy source or destination float below the copy.
4438   insert_mem_bar(Op_MemBarCPUOrder);
4439 
4440   // Call it.  Note that the length argument is not scaled.
4441   make_runtime_call(RC_LEAF|RC_NO_FP,
4442                     OptoRuntime::fast_arraycopy_Type(),
4443                     StubRoutines::unsafe_arraycopy(),
4444                     "unsafe_arraycopy",
4445                     TypeRawPtr::BOTTOM,
4446                     src, dst, size XTOP);
4447 
4448   // Do not let reads of the copy destination float above the copy.
4449   insert_mem_bar(Op_MemBarCPUOrder);
4450 
4451   return true;
4452 }
4453 
4454 //------------------------clone_coping-----------------------------------
4455 // Helper function for inline_native_clone.
4456 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
4457   assert(obj_size != NULL, "");
4458   Node* raw_obj = alloc_obj->in(1);
4459   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4460 


4461   AllocateNode* alloc = NULL;
4462   if (ReduceBulkZeroing) {
4463     // We will be completely responsible for initializing this object -
4464     // mark Initialize node as complete.
4465     alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4466     // The object was just allocated - there should be no any stores!
4467     guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4468     // Mark as complete_with_arraycopy so that on AllocateNode
4469     // expansion, we know this AllocateNode is initialized by an array
4470     // copy and a StoreStore barrier exists after the array copy.
4471     alloc->initialization()->set_complete_with_arraycopy();
4472   }
4473 
4474   // Copy the fastest available way.
4475   // TODO: generate fields copies for small objects instead.
4476   Node* src  = obj;
4477   Node* dest = alloc_obj;
4478   Node* size = _gvn.transform(obj_size);
4479 
4480   // Exclude the header but include array length to copy by 8 bytes words.


4493     } else {
4494       // Include klass to copy by 8 bytes words.
4495       base_off = instanceOopDesc::klass_offset_in_bytes();
4496     }
4497     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4498   }
4499   src  = basic_plus_adr(src,  base_off);
4500   dest = basic_plus_adr(dest, base_off);
4501 
4502   // Compute the length also, if needed:
4503   Node* countx = size;
4504   countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4505   countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4506 
4507   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4508   bool disjoint_bases = true;
4509   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4510                                src, NULL, dest, NULL, countx,
4511                                /*dest_uninitialized*/true);
4512 









4513   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4514   if (card_mark) {
4515     assert(!is_array, "");
4516     // Put in store barrier for any and all oops we are sticking
4517     // into this object.  (We could avoid this if we could prove
4518     // that the object type contains no oop fields at all.)
4519     Node* no_particular_value = NULL;
4520     Node* no_particular_field = NULL;
4521     int raw_adr_idx = Compile::AliasIdxRaw;
4522     post_barrier(control(),
4523                  memory(raw_adr_type),
4524                  alloc_obj,
4525                  no_particular_field,
4526                  raw_adr_idx,
4527                  no_particular_value,
4528                  T_OBJECT,
4529                  false);
4530   }
4531 
4532   // Do not let reads from the cloned object float above the arraycopy.


4601 
4602     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4603     int raw_adr_idx = Compile::AliasIdxRaw;
4604 
4605     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4606     if (array_ctl != NULL) {
4607       // It's an array.
4608       PreserveJVMState pjvms(this);
4609       set_control(array_ctl);
4610       Node* obj_length = load_array_length(obj);
4611       Node* obj_size  = NULL;
4612       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push
4613 
4614       if (!use_ReduceInitialCardMarks()) {
4615         // If it is an oop array, it requires very special treatment,
4616         // because card marking is required on each card of the array.
4617         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4618         if (is_obja != NULL) {
4619           PreserveJVMState pjvms2(this);
4620           set_control(is_obja);



4621           // Generate a direct call to the right arraycopy function(s).
4622           bool disjoint_bases = true;
4623           bool length_never_negative = true;
4624           generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
4625                              obj, intcon(0), alloc_obj, intcon(0),
4626                              obj_length,
4627                              disjoint_bases, length_never_negative);
4628           result_reg->init_req(_objArray_path, control());
4629           result_val->init_req(_objArray_path, alloc_obj);
4630           result_i_o ->set_req(_objArray_path, i_o());
4631           result_mem ->set_req(_objArray_path, reset_memory());
4632         }
4633       }
4634       // Otherwise, there are no card marks to worry about.
4635       // (We can dispense with card marks if we know the allocation
4636       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4637       //  causes the non-eden paths to take compensating steps to
4638       //  simulate a fresh allocation, so that no further
4639       //  card marks are required in compiled code to initialize
4640       //  the object.)


4818         src_type  = _gvn.type(src);
4819         top_src  = src_type->isa_aryptr();
4820         has_src = (top_src != NULL && top_src->klass() != NULL);
4821         src_spec = true;
4822       }
4823       if (!has_dest) {
4824         dest = maybe_cast_profiled_obj(dest, dest_k);
4825         dest_type  = _gvn.type(dest);
4826         top_dest  = dest_type->isa_aryptr();
4827         has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4828         dest_spec = true;
4829       }
4830     }
4831   }
4832 
4833   if (!has_src || !has_dest) {
4834     // Conservatively insert a memory barrier on all memory slices.
4835     // Do not let writes into the source float below the arraycopy.
4836     insert_mem_bar(Op_MemBarCPUOrder);
4837 



4838     // Call StubRoutines::generic_arraycopy stub.
4839     generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
4840                        src, src_offset, dest, dest_offset, length);
4841 
4842     // Do not let reads from the destination float above the arraycopy.
4843     // Since we cannot type the arrays, we don't know which slices
4844     // might be affected.  We could restrict this barrier only to those
4845     // memory slices which pertain to array elements--but don't bother.
4846     if (!InsertMemBarAfterArraycopy)
4847       // (If InsertMemBarAfterArraycopy, there is already one in place.)
4848       insert_mem_bar(Op_MemBarCPUOrder);
4849     return true;
4850   }
4851 
4852   // (2) src and dest arrays must have elements of the same BasicType
4853   // Figure out the size and type of the elements we will be copying.
4854   BasicType src_elem  =  top_src->klass()->as_array_klass()->element_type()->basic_type();
4855   BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4856   if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
4857   if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
4858 
4859   if (src_elem != dest_elem || dest_elem == T_VOID) {
4860     // The component types are not the same or are not recognized.  Punt.
4861     // (But, avoid the native method wrapper to JVM_ArrayCopy.)




4862     generate_slow_arraycopy(TypePtr::BOTTOM,
4863                             src, src_offset, dest, dest_offset, length,
4864                             /*dest_uninitialized*/false);
4865     return true;
4866   }
4867 
4868   if (src_elem == T_OBJECT) {
4869     // If both arrays are object arrays then having the exact types
4870     // for both will remove the need for a subtype check at runtime
4871     // before the call and may make it possible to pick a faster copy
4872     // routine (without a subtype check on every element)
4873     // Do we have the exact type of src?
4874     bool could_have_src = src_spec;
4875     // Do we have the exact type of dest?
4876     bool could_have_dest = dest_spec;
4877     ciKlass* src_k = top_src->klass();
4878     ciKlass* dest_k = top_dest->klass();
4879     if (!src_spec) {
4880       src_k = src_type->speculative_type();
4881       if (src_k != NULL && src_k->is_array_klass()) {


4907   // (3) src and dest must not be null.
4908   // (4) src_offset must not be negative.
4909   // (5) dest_offset must not be negative.
4910   // (6) length must not be negative.
4911   // (7) src_offset + length must not exceed length of src.
4912   // (8) dest_offset + length must not exceed length of dest.
4913   // (9) each element of an oop array must be assignable
4914 
4915   RegionNode* slow_region = new (C) RegionNode(1);
4916   record_for_igvn(slow_region);
4917 
4918   // (3) operands must not be null
4919   // We currently perform our null checks with the null_check routine.
4920   // This means that the null exceptions will be reported in the caller
4921   // rather than (correctly) reported inside of the native arraycopy call.
4922   // This should be corrected, given time.  We do our null check with the
4923   // stack pointer restored.
4924   src  = null_check(src,  T_ARRAY);
4925   dest = null_check(dest, T_ARRAY);
4926 



4927   // (4) src_offset must not be negative.
4928   generate_negative_guard(src_offset, slow_region);
4929 
4930   // (5) dest_offset must not be negative.
4931   generate_negative_guard(dest_offset, slow_region);
4932 
4933   // (6) length must not be negative (moved to generate_arraycopy()).
4934   // generate_negative_guard(length, slow_region);
4935 
4936   // (7) src_offset + length must not exceed length of src.
4937   generate_limit_guard(src_offset, length,
4938                        load_array_length(src),
4939                        slow_region);
4940 
4941   // (8) dest_offset + length must not exceed length of dest.
4942   generate_limit_guard(dest_offset, length,
4943                        load_array_length(dest),
4944                        slow_region);
4945 
4946   // (9) each element of an oop array must be assignable


5223       PreserveJVMState pjvms(this);
5224       set_control(not_subtype_ctrl);
5225       // (At this point we can assume disjoint_bases, since types differ.)
5226       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5227       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5228       Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5229       Node* dest_elem_klass = _gvn.transform(n1);
5230       Node* cv = generate_checkcast_arraycopy(adr_type,
5231                                               dest_elem_klass,
5232                                               src, src_offset, dest, dest_offset,
5233                                               ConvI2X(copy_length), dest_uninitialized);
5234       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
5235       checked_control = control();
5236       checked_i_o     = i_o();
5237       checked_mem     = memory(adr_type);
5238       checked_value   = cv;
5239     }
5240     // At this point we know we do not need type checks on oop stores.
5241 
5242     // Let's see if we need card marks:
5243     if (alloc != NULL && use_ReduceInitialCardMarks()) {
5244       // If we do not need card marks, copy using the jint or jlong stub.
5245       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5246       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5247              "sizes agree");
5248     }
5249   }
5250 
5251   if (!stopped()) {
5252     // Generate the fast path, if possible.
5253     PreserveJVMState pjvms(this);
5254     generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5255                                  src, src_offset, dest, dest_offset,
5256                                  ConvI2X(copy_length), dest_uninitialized);
5257 
5258     // Present the results of the fast call.
5259     result_region->init_req(fast_path, control());
5260     result_i_o   ->init_req(fast_path, i_o());
5261     result_memory->init_req(fast_path, memory(adr_type));
5262   }
5263 


5375     // a subsequent store that would make this object accessible by
5376     // other threads.
5377     // Record what AllocateNode this StoreStore protects so that
5378     // escape analysis can go from the MemBarStoreStoreNode to the
5379     // AllocateNode and eliminate the MemBarStoreStoreNode if possible
5380     // based on the escape status of the AllocateNode.
5381     insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
5382   } else if (InsertMemBarAfterArraycopy)
5383     insert_mem_bar(Op_MemBarCPUOrder);
5384 }
5385 
5386 
5387 // Helper function which determines if an arraycopy immediately follows
5388 // an allocation, with no intervening tests or other escapes for the object.
5389 AllocateArrayNode*
5390 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
5391                                            RegionNode* slow_region) {
5392   if (stopped())             return NULL;  // no fast path
5393   if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
5394 


5395   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
5396   if (alloc == NULL)  return NULL;
5397 
5398   Node* rawmem = memory(Compile::AliasIdxRaw);
5399   // Is the allocation's memory state untouched?
5400   if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
5401     // Bail out if there have been raw-memory effects since the allocation.
5402     // (Example:  There might have been a call or safepoint.)
5403     return NULL;
5404   }
5405   rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
5406   if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
5407     return NULL;
5408   }
5409 
5410   // There must be no unexpected observers of this allocation.
5411   for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
5412     Node* obs = ptr->fast_out(i);
5413     if (obs != this->map()) {
5414       return NULL;


5753                           disjoint_bases, copyfunc_name, dest_uninitialized);
5754 
5755   // Call it.  Note that the count_ix value is not scaled to a byte-size.
5756   make_runtime_call(RC_LEAF|RC_NO_FP,
5757                     OptoRuntime::fast_arraycopy_Type(),
5758                     copyfunc_addr, copyfunc_name, adr_type,
5759                     src_start, dest_start, copy_length XTOP);
5760 }
5761 
5762 //-------------inline_encodeISOArray-----------------------------------
5763 // encode char[] to byte[] in ISO_8859_1
5764 bool LibraryCallKit::inline_encodeISOArray() {
5765   assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
5766   // no receiver since it is static method
5767   Node *src         = argument(0);
5768   Node *src_offset  = argument(1);
5769   Node *dst         = argument(2);
5770   Node *dst_offset  = argument(3);
5771   Node *length      = argument(4);
5772 






5773   const Type* src_type = src->Value(&_gvn);
5774   const Type* dst_type = dst->Value(&_gvn);
5775   const TypeAryPtr* top_src = src_type->isa_aryptr();
5776   const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5777   if (top_src  == NULL || top_src->klass()  == NULL ||
5778       top_dest == NULL || top_dest->klass() == NULL) {
5779     // failed array check
5780     return false;
5781   }
5782 
5783   // Figure out the size and type of the elements we will be copying.
5784   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5785   BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5786   if (src_elem != T_CHAR || dst_elem != T_BYTE) {
5787     return false;
5788   }
5789   Node* src_start = array_element_address(src, src_offset, src_elem);
5790   Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5791   // 'src_start' points to src array + scaled offset
5792   // 'dst_start' points to dst array + scaled offset


5802 
5803 //-------------inline_multiplyToLen-----------------------------------
5804 bool LibraryCallKit::inline_multiplyToLen() {
5805   assert(UseMultiplyToLenIntrinsic, "not implementated on this platform");
5806 
5807   address stubAddr = StubRoutines::multiplyToLen();
5808   if (stubAddr == NULL) {
5809     return false; // Intrinsic's stub is not implemented on this platform
5810   }
5811   const char* stubName = "multiplyToLen";
5812 
5813   assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
5814 
5815   // no receiver because it is a static method
5816   Node* x    = argument(0);
5817   Node* xlen = argument(1);
5818   Node* y    = argument(2);
5819   Node* ylen = argument(3);
5820   Node* z    = argument(4);
5821 






5822   const Type* x_type = x->Value(&_gvn);
5823   const Type* y_type = y->Value(&_gvn);
5824   const TypeAryPtr* top_x = x_type->isa_aryptr();
5825   const TypeAryPtr* top_y = y_type->isa_aryptr();
5826   if (top_x  == NULL || top_x->klass()  == NULL ||
5827       top_y == NULL || top_y->klass() == NULL) {
5828     // failed array check
5829     return false;
5830   }
5831 
5832   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5833   BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5834   if (x_elem != T_INT || y_elem != T_INT) {
5835     return false;
5836   }
5837 
5838   // Set the original stack and the reexecute bit for the interpreter to reexecute
5839   // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
5840   // on the return from z array allocation in runtime.
5841   { PreserveReexecuteState preexecs(this);


5847     // 'y_start' points to y array + scaled ylen
5848 
5849     // Allocate the result array
5850     Node* zlen = _gvn.transform(new(C) AddINode(xlen, ylen));
5851     ciKlass* klass = ciTypeArrayKlass::make(T_INT);
5852     Node* klass_node = makecon(TypeKlassPtr::make(klass));
5853 
5854     IdealKit ideal(this);
5855 
5856 #define __ ideal.
5857      Node* one = __ ConI(1);
5858      Node* zero = __ ConI(0);
5859      IdealVariable need_alloc(ideal), z_alloc(ideal);  __ declarations_done();
5860      __ set(need_alloc, zero);
5861      __ set(z_alloc, z);
5862      __ if_then(z, BoolTest::eq, null()); {
5863        __ increment (need_alloc, one);
5864      } __ else_(); {
5865        // Update graphKit memory and control from IdealKit.
5866        sync_kit(ideal);
5867        Node* zlen_arg = load_array_length(z);









5868        // Update IdealKit memory and control from graphKit.
5869        __ sync_kit(this);
5870        __ if_then(zlen_arg, BoolTest::lt, zlen); {
5871          __ increment (need_alloc, one);
5872        } __ end_if();
5873      } __ end_if();
5874 
5875      __ if_then(__ value(need_alloc), BoolTest::ne, zero); {
5876        // Update graphKit memory and control from IdealKit.
5877        sync_kit(ideal);
5878        Node * narr = new_array(klass_node, zlen, 1);
5879        // Update IdealKit memory and control from graphKit.
5880        __ sync_kit(this);
5881        __ set(z_alloc, narr);
5882      } __ end_if();
5883 
5884      sync_kit(ideal);
5885      z = __ value(z_alloc);
5886      // Can't use TypeAryPtr::INTS which uses Bottom offset.
5887      _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));


5902   return true;
5903 }
5904 
5905 //-------------inline_squareToLen------------------------------------
5906 bool LibraryCallKit::inline_squareToLen() {
5907   assert(UseSquareToLenIntrinsic, "not implementated on this platform");
5908 
5909   address stubAddr = StubRoutines::squareToLen();
5910   if (stubAddr == NULL) {
5911     return false; // Intrinsic's stub is not implemented on this platform
5912   }
5913   const char* stubName = "squareToLen";
5914 
5915   assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5916 
5917   Node* x    = argument(0);
5918   Node* len  = argument(1);
5919   Node* z    = argument(2);
5920   Node* zlen = argument(3);
5921 





5922   const Type* x_type = x->Value(&_gvn);
5923   const Type* z_type = z->Value(&_gvn);
5924   const TypeAryPtr* top_x = x_type->isa_aryptr();
5925   const TypeAryPtr* top_z = z_type->isa_aryptr();
5926   if (top_x  == NULL || top_x->klass()  == NULL ||
5927       top_z  == NULL || top_z->klass()  == NULL) {
5928     // failed array check
5929     return false;
5930   }
5931 
5932   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5933   BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5934   if (x_elem != T_INT || z_elem != T_INT) {
5935     return false;
5936   }
5937 
5938 
5939   Node* x_start = array_element_address(x, intcon(0), x_elem);
5940   Node* z_start = array_element_address(z, intcon(0), z_elem);
5941 


5949 }
5950 
5951 //-------------inline_mulAdd------------------------------------------
5952 bool LibraryCallKit::inline_mulAdd() {
5953   assert(UseMulAddIntrinsic, "not implementated on this platform");
5954 
5955   address stubAddr = StubRoutines::mulAdd();
5956   if (stubAddr == NULL) {
5957     return false; // Intrinsic's stub is not implemented on this platform
5958   }
5959   const char* stubName = "mulAdd";
5960 
5961   assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
5962 
5963   Node* out      = argument(0);
5964   Node* in       = argument(1);
5965   Node* offset   = argument(2);
5966   Node* len      = argument(3);
5967   Node* k        = argument(4);
5968 




5969   const Type* out_type = out->Value(&_gvn);
5970   const Type* in_type = in->Value(&_gvn);
5971   const TypeAryPtr* top_out = out_type->isa_aryptr();
5972   const TypeAryPtr* top_in = in_type->isa_aryptr();
5973   if (top_out  == NULL || top_out->klass()  == NULL ||
5974       top_in == NULL || top_in->klass() == NULL) {
5975     // failed array check
5976     return false;
5977   }
5978 
5979   BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5980   BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5981   if (out_elem != T_INT || in_elem != T_INT) {
5982     return false;
5983   }
5984 
5985   Node* outlen = load_array_length(out);
5986   Node* new_offset = _gvn.transform(new (C) SubINode(outlen, offset));
5987   Node* out_start = array_element_address(out, intcon(0), out_elem);
5988   Node* in_start = array_element_address(in, intcon(0), in_elem);


5998 
5999 //-------------inline_montgomeryMultiply-----------------------------------
6000 bool LibraryCallKit::inline_montgomeryMultiply() {
6001   address stubAddr = StubRoutines::montgomeryMultiply();
6002   if (stubAddr == NULL) {
6003     return false; // Intrinsic's stub is not implemented on this platform
6004   }
6005 
6006   assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
6007   const char* stubName = "montgomery_multiply";
6008 
6009   assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
6010 
6011   Node* a    = argument(0);
6012   Node* b    = argument(1);
6013   Node* n    = argument(2);
6014   Node* len  = argument(3);
6015   Node* inv  = argument(4);
6016   Node* m    = argument(6);
6017 





6018   const Type* a_type = a->Value(&_gvn);
6019   const TypeAryPtr* top_a = a_type->isa_aryptr();
6020   const Type* b_type = b->Value(&_gvn);
6021   const TypeAryPtr* top_b = b_type->isa_aryptr();
6022   const Type* n_type = a->Value(&_gvn);
6023   const TypeAryPtr* top_n = n_type->isa_aryptr();
6024   const Type* m_type = a->Value(&_gvn);
6025   const TypeAryPtr* top_m = m_type->isa_aryptr();
6026   if (top_a  == NULL || top_a->klass()  == NULL ||
6027       top_b == NULL || top_b->klass()  == NULL ||
6028       top_n == NULL || top_n->klass()  == NULL ||
6029       top_m == NULL || top_m->klass()  == NULL) {
6030     // failed array check
6031     return false;
6032   }
6033 
6034   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6035   BasicType b_elem = b_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6036   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6037   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();


6067   return true;
6068 }
6069 
6070 bool LibraryCallKit::inline_montgomerySquare() {
6071   address stubAddr = StubRoutines::montgomerySquare();
6072   if (stubAddr == NULL) {
6073     return false; // Intrinsic's stub is not implemented on this platform
6074   }
6075 
6076   assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
6077   const char* stubName = "montgomery_square";
6078 
6079   assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
6080 
6081   Node* a    = argument(0);
6082   Node* n    = argument(1);
6083   Node* len  = argument(2);
6084   Node* inv  = argument(3);
6085   Node* m    = argument(5);
6086 




6087   const Type* a_type = a->Value(&_gvn);
6088   const TypeAryPtr* top_a = a_type->isa_aryptr();
6089   const Type* n_type = a->Value(&_gvn);
6090   const TypeAryPtr* top_n = n_type->isa_aryptr();
6091   const Type* m_type = a->Value(&_gvn);
6092   const TypeAryPtr* top_m = m_type->isa_aryptr();
6093   if (top_a  == NULL || top_a->klass()  == NULL ||
6094       top_n == NULL || top_n->klass()  == NULL ||
6095       top_m == NULL || top_m->klass()  == NULL) {
6096     // failed array check
6097     return false;
6098   }
6099 
6100   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6101   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6102   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6103   if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
6104     return false;
6105   }
6106 


6178   // no receiver since it is static method
6179   Node* crc     = argument(0); // type: int
6180   Node* src     = argument(1); // type: oop
6181   Node* offset  = argument(2); // type: int
6182   Node* length  = argument(3); // type: int
6183 
6184   const Type* src_type = src->Value(&_gvn);
6185   const TypeAryPtr* top_src = src_type->isa_aryptr();
6186   if (top_src  == NULL || top_src->klass()  == NULL) {
6187     // failed array check
6188     return false;
6189   }
6190 
6191   // Figure out the size and type of the elements we will be copying.
6192   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6193   if (src_elem != T_BYTE) {
6194     return false;
6195   }
6196 
6197   // 'src_start' points to src array + scaled offset



6198   Node* src_start = array_element_address(src, offset, src_elem);
6199 
6200   // We assume that range check is done by caller.
6201   // TODO: generate range check (offset+length < src.length) in debug VM.
6202 
6203   // Call the stub.
6204   address stubAddr = StubRoutines::updateBytesCRC32();
6205   const char *stubName = "updateBytesCRC32";
6206   Node* call;
6207   if (CCallingConventionRequiresIntsAsLongs) {
6208    call =  make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6209                              stubAddr, stubName, TypePtr::BOTTOM,
6210                              crc XTOP, src_start, length XTOP);
6211   } else {
6212     call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6213                              stubAddr, stubName, TypePtr::BOTTOM,
6214                              crc, src_start, length);
6215   }
6216   Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
6217   set_result(result);


6287 
6288   set_result(result);
6289   return true;
6290 }
6291 
6292 
6293 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6294                                               bool is_exact=true, bool is_static=false) {
6295 
6296   const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6297   assert(tinst != NULL, "obj is null");
6298   assert(tinst->klass()->is_loaded(), "obj is not loaded");
6299   assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
6300 
6301   ciField* field = tinst->klass()->as_instance_klass()->get_field_by_name(ciSymbol::make(fieldName),
6302                                                                           ciSymbol::make(fieldTypeString),
6303                                                                           is_static);
6304   if (field == NULL) return (Node *) NULL;
6305   assert (field != NULL, "undefined field");
6306 








6307   // Next code  copied from Parse::do_get_xxx():
6308 
6309   // Compute address and memory type.
6310   int offset  = field->offset_in_bytes();
6311   bool is_vol = field->is_volatile();
6312   ciType* field_klass = field->type();
6313   assert(field_klass->is_loaded(), "should be loaded");
6314   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6315   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6316   BasicType bt = field->layout_type();
6317 
6318   // Build the resultant type of the load
6319   const Type *type;
6320   if (bt == T_OBJECT) {
6321     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6322   } else {
6323     type = Type::get_const_basic_type(bt);
6324   }
6325 

6326   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6327     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6328   }
6329   // Build the load.
6330   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6331   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
6332   // If reference is volatile, prevent following memory ops from
6333   // floating up past the volatile read.  Also prevents commoning
6334   // another volatile read.
6335   if (is_vol) {
6336     // Memory barrier includes bogus read of value to force load BEFORE membar
6337     insert_mem_bar(Op_MemBarAcquire, loadedField);

6338   }
6339   return loadedField;
6340 }
6341 
6342 
6343 //------------------------------inline_aescrypt_Block-----------------------
6344 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6345   address stubAddr = NULL;
6346   const char *stubName;
6347   assert(UseAES, "need AES instruction support");
6348 
6349   switch(id) {
6350   case vmIntrinsics::_aescrypt_encryptBlock:
6351     stubAddr = StubRoutines::aescrypt_encryptBlock();
6352     stubName = "aescrypt_encryptBlock";
6353     break;
6354   case vmIntrinsics::_aescrypt_decryptBlock:
6355     stubAddr = StubRoutines::aescrypt_decryptBlock();
6356     stubName = "aescrypt_decryptBlock";
6357     break;
6358   }
6359   if (stubAddr == NULL) return false;
6360 
6361   Node* aescrypt_object = argument(0);
6362   Node* src             = argument(1);
6363   Node* src_offset      = argument(2);
6364   Node* dest            = argument(3);
6365   Node* dest_offset     = argument(4);
6366 






6367   // (1) src and dest are arrays.
6368   const Type* src_type = src->Value(&_gvn);
6369   const Type* dest_type = dest->Value(&_gvn);
6370   const TypeAryPtr* top_src = src_type->isa_aryptr();
6371   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6372   assert (top_src  != NULL && top_src->klass()  != NULL &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6373 
6374   // for the quick and dirty code we will skip all the checks.
6375   // we are just trying to get the call to be generated.
6376   Node* src_start  = src;
6377   Node* dest_start = dest;
6378   if (src_offset != NULL || dest_offset != NULL) {
6379     assert(src_offset != NULL && dest_offset != NULL, "");
6380     src_start  = array_element_address(src,  src_offset,  T_BYTE);
6381     dest_start = array_element_address(dest, dest_offset, T_BYTE);
6382   }
6383 
6384   // now need to get the start of its expanded key array
6385   // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6386   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);


6415 
6416   switch(id) {
6417   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
6418     stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
6419     stubName = "cipherBlockChaining_encryptAESCrypt";
6420     break;
6421   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
6422     stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
6423     stubName = "cipherBlockChaining_decryptAESCrypt";
6424     break;
6425   }
6426   if (stubAddr == NULL) return false;
6427 
6428   Node* cipherBlockChaining_object = argument(0);
6429   Node* src                        = argument(1);
6430   Node* src_offset                 = argument(2);
6431   Node* len                        = argument(3);
6432   Node* dest                       = argument(4);
6433   Node* dest_offset                = argument(5);
6434 







6435   // (1) src and dest are arrays.
6436   const Type* src_type = src->Value(&_gvn);
6437   const Type* dest_type = dest->Value(&_gvn);
6438   const TypeAryPtr* top_src = src_type->isa_aryptr();
6439   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6440   assert (top_src  != NULL && top_src->klass()  != NULL
6441           &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6442 
6443   // checks are the responsibility of the caller
6444   Node* src_start  = src;
6445   Node* dest_start = dest;
6446   if (src_offset != NULL || dest_offset != NULL) {
6447     assert(src_offset != NULL && dest_offset != NULL, "");
6448     src_start  = array_element_address(src,  src_offset,  T_BYTE);
6449     dest_start = array_element_address(dest, dest_offset, T_BYTE);
6450   }
6451 
6452   // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6453   // (because of the predicated logic executed earlier).
6454   // so we cast it here safely.


6459 
6460   // cast it to what we know it will be at runtime
6461   const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
6462   assert(tinst != NULL, "CBC obj is null");
6463   assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
6464   ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6465   assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6466 
6467   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6468   const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6469   const TypeOopPtr* xtype = aklass->as_instance_type();
6470   Node* aescrypt_object = new(C) CheckCastPPNode(control(), embeddedCipherObj, xtype);
6471   aescrypt_object = _gvn.transform(aescrypt_object);
6472 
6473   // we need to get the start of the aescrypt_object's expanded key array
6474   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6475   if (k_start == NULL) return false;
6476 
6477   // similarly, get the start address of the r vector
6478   Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);



6479   if (objRvec == NULL) return false;
6480   Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
6481 
6482   Node* cbcCrypt;
6483   if (Matcher::pass_original_key_for_aes()) {
6484     // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
6485     // compatibility issues between Java key expansion and SPARC crypto instructions
6486     Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
6487     if (original_k_start == NULL) return false;
6488 
6489     // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
6490     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6491                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6492                                  stubAddr, stubName, TypePtr::BOTTOM,
6493                                  src_start, dest_start, k_start, r_start, len, original_k_start);
6494   } else {
6495     // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6496     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6497                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6498                                  stubAddr, stubName, TypePtr::BOTTOM,


6507 
6508 //------------------------------get_key_start_from_aescrypt_object-----------------------
6509 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6510 #ifdef PPC64
6511   // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
6512   // Intel's extention is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
6513   // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
6514   // The ppc64 stubs of encryption and decryption use the same round keys (sessionK[0]).
6515   Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I", /*is_exact*/ false);
6516   assert (objSessionK != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6517   if (objSessionK == NULL) {
6518     return (Node *) NULL;
6519   }
6520   Node* objAESCryptKey = load_array_element(control(), objSessionK, intcon(0), TypeAryPtr::OOPS);
6521 #else
6522   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
6523 #endif // PPC64
6524   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6525   if (objAESCryptKey == NULL) return (Node *) NULL;
6526 


6527   // now have the array, need to get the start address of the K array
6528   Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6529   return k_start;
6530 }
6531 
6532 //------------------------------get_original_key_start_from_aescrypt_object-----------------------
6533 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6534   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6535   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6536   if (objAESCryptKey == NULL) return (Node *) NULL;
6537 


6538   // now have the array, need to get the start address of the lastKey array
6539   Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6540   return original_k_start;
6541 }
6542 
6543 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6544 // Return node representing slow path of predicate check.
6545 // the pseudo code we want to emulate with this predicate is:
6546 // for encryption:
6547 //    if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6548 // for decryption:
6549 //    if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6550 //    note cipher==plain is more conservative than the original java code but that's OK
6551 //
6552 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
6553   // The receiver was checked for NULL already.
6554   Node* objCBC = argument(0);
6555 



6556   // Load embeddedCipher field of CipherBlockChaining object.
6557   Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6558 
6559   // get AESCrypt klass for instanceOf check
6560   // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6561   // will have same classloader as CipherBlockChaining object
6562   const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6563   assert(tinst != NULL, "CBCobj is null");
6564   assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6565 
6566   // we want to do an instanceof comparison against the AESCrypt class
6567   ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6568   if (!klass_AESCrypt->is_loaded()) {
6569     // if AESCrypt is not even loaded, we never take the intrinsic fast path
6570     Node* ctrl = control();
6571     set_control(top()); // no regular fast path
6572     return ctrl;
6573   }









6574   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6575 
6576   Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6577   Node* cmp_instof  = _gvn.transform(new (C) CmpINode(instof, intcon(1)));
6578   Node* bool_instof  = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
6579 
6580   Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6581 
6582   // for encryption, we are done
6583   if (!decrypting)
6584     return instof_false;  // even if it is NULL
6585 
6586   // for decryption, we need to add a further check to avoid
6587   // taking the intrinsic path when cipher and plain are the same
6588   // see the original java code for why.
6589   RegionNode* region = new(C) RegionNode(3);
6590   region->init_req(1, instof_false);
6591   Node* src = argument(1);
6592   Node* dest = argument(4);
6593   Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest));
6594   Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq));
6595   Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
6596   region->init_req(2, src_dest_conjoint);
6597 
6598   record_for_igvn(region);
6599   return _gvn.transform(region);
6600 }
6601 
6602 //------------------------------inline_sha_implCompress-----------------------
6603 //
6604 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
6605 // void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
6606 //
6607 // Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
6608 // void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
6609 //
6610 // Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
6611 // void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
6612 //
6613 bool LibraryCallKit::inline_sha_implCompress(vmIntrinsics::ID id) {
6614   assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
6615 
6616   Node* sha_obj = argument(0);
6617   Node* src     = argument(1); // type oop
6618   Node* ofs     = argument(2); // type int
6619 
6620   const Type* src_type = src->Value(&_gvn);
6621   const TypeAryPtr* top_src = src_type->isa_aryptr();
6622   if (top_src  == NULL || top_src->klass()  == NULL) {
6623     // failed array check
6624     return false;
6625   }
6626   // Figure out the size and type of the elements we will be copying.
6627   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6628   if (src_elem != T_BYTE) {
6629     return false;
6630   }
6631   // 'src_start' points to src array + offset


6632   Node* src_start = array_element_address(src, ofs, src_elem);
6633   Node* state = NULL;
6634   address stubAddr;
6635   const char *stubName;
6636 
6637   switch(id) {
6638   case vmIntrinsics::_sha_implCompress:
6639     assert(UseSHA1Intrinsics, "need SHA1 instruction support");
6640     state = get_state_from_sha_object(sha_obj);
6641     stubAddr = StubRoutines::sha1_implCompress();
6642     stubName = "sha1_implCompress";
6643     break;
6644   case vmIntrinsics::_sha2_implCompress:
6645     assert(UseSHA256Intrinsics, "need SHA256 instruction support");
6646     state = get_state_from_sha_object(sha_obj);
6647     stubAddr = StubRoutines::sha256_implCompress();
6648     stubName = "sha256_implCompress";
6649     break;
6650   case vmIntrinsics::_sha5_implCompress:
6651     assert(UseSHA512Intrinsics, "need SHA512 instruction support");


6678   assert((uint)predicate < 3, "sanity");
6679   assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
6680 
6681   Node* digestBase_obj = argument(0); // The receiver was checked for NULL already.
6682   Node* src            = argument(1); // byte[] array
6683   Node* ofs            = argument(2); // type int
6684   Node* limit          = argument(3); // type int
6685 
6686   const Type* src_type = src->Value(&_gvn);
6687   const TypeAryPtr* top_src = src_type->isa_aryptr();
6688   if (top_src  == NULL || top_src->klass()  == NULL) {
6689     // failed array check
6690     return false;
6691   }
6692   // Figure out the size and type of the elements we will be copying.
6693   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6694   if (src_elem != T_BYTE) {
6695     return false;
6696   }
6697   // 'src_start' points to src array + offset


6698   Node* src_start = array_element_address(src, ofs, src_elem);
6699 
6700   const char* klass_SHA_name = NULL;
6701   const char* stub_name = NULL;
6702   address     stub_addr = NULL;
6703   bool        long_state = false;
6704 
6705   switch (predicate) {
6706   case 0:
6707     if (UseSHA1Intrinsics) {
6708       klass_SHA_name = "sun/security/provider/SHA";
6709       stub_name = "sha1_implCompressMB";
6710       stub_addr = StubRoutines::sha1_implCompressMB();
6711     }
6712     break;
6713   case 1:
6714     if (UseSHA256Intrinsics) {
6715       klass_SHA_name = "sun/security/provider/SHA2";
6716       stub_name = "sha256_implCompressMB";
6717       stub_addr = StubRoutines::sha256_implCompressMB();


6767                              src_start, state, ofs XTOP, limit XTOP);
6768   } else {
6769     call = make_runtime_call(RC_LEAF|RC_NO_FP,
6770                              OptoRuntime::digestBase_implCompressMB_Type(),
6771                              stubAddr, stubName, TypePtr::BOTTOM,
6772                              src_start, state, ofs, limit);
6773   }
6774   // return ofs (int)
6775   Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
6776   set_result(result);
6777 
6778   return true;
6779 }
6780 
6781 //------------------------------get_state_from_sha_object-----------------------
6782 Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
6783   Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
6784   assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
6785   if (sha_state == NULL) return (Node *) NULL;
6786 


6787   // now have the array, need to get the start address of the state array
6788   Node* state = array_element_address(sha_state, intcon(0), T_INT);
6789   return state;
6790 }
6791 
6792 //------------------------------get_state_from_sha5_object-----------------------
6793 Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
6794   Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
6795   assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
6796   if (sha_state == NULL) return (Node *) NULL;


6797 
6798   // now have the array, need to get the start address of the state array
6799   Node* state = array_element_address(sha_state, intcon(0), T_LONG);
6800   return state;
6801 }
6802 
6803 //----------------------------inline_digestBase_implCompressMB_predicate----------------------------
6804 // Return node representing slow path of predicate check.
6805 // the pseudo code we want to emulate with this predicate is:
6806 //    if (digestBaseObj instanceof SHA/SHA2/SHA5) do_intrinsic, else do_javapath
6807 //
6808 Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
6809   assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
6810          "need SHA1/SHA256/SHA512 instruction support");
6811   assert((uint)predicate < 3, "sanity");
6812 
6813   // The receiver was checked for NULL already.
6814   Node* digestBaseObj = argument(0);
6815 
6816   // get DigestBase klass for instanceOf check




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileBroker.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/callGenerator.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/idealKit.hpp"
  36 #include "opto/mathexactnode.hpp"
  37 #include "opto/mulnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/shenandoahSupport.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "prims/nativeLookup.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "trace/traceMacros.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 
  48 class LibraryIntrinsic : public InlineCallGenerator {
  49   // Extend the set of intrinsics known to the runtime:
  50  public:
  51  private:
  52   bool             _is_virtual;
  53   bool             _does_virtual_dispatch;
  54   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  55   int8_t           _last_predicate; // Last generated predicate
  56   vmIntrinsics::ID _intrinsic_id;
  57 
  58  public:
  59   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  60     : InlineCallGenerator(m),
  61       _is_virtual(is_virtual),
  62       _does_virtual_dispatch(does_virtual_dispatch),
  63       _predicates_count((int8_t)predicates_count),
  64       _last_predicate((int8_t)-1),
  65       _intrinsic_id(id)
  66   {


 211   bool inline_trig(vmIntrinsics::ID id);
 212   bool inline_math(vmIntrinsics::ID id);
 213   template <typename OverflowOp>
 214   bool inline_math_overflow(Node* arg1, Node* arg2);
 215   void inline_math_mathExact(Node* math, Node* test);
 216   bool inline_math_addExactI(bool is_increment);
 217   bool inline_math_addExactL(bool is_increment);
 218   bool inline_math_multiplyExactI();
 219   bool inline_math_multiplyExactL();
 220   bool inline_math_negateExactI();
 221   bool inline_math_negateExactL();
 222   bool inline_math_subtractExactI(bool is_decrement);
 223   bool inline_math_subtractExactL(bool is_decrement);
 224   bool inline_exp();
 225   bool inline_pow();
 226   Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
 227   bool inline_min_max(vmIntrinsics::ID id);
 228   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 229   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 230   int classify_unsafe_addr(Node* &base, Node* &offset);
 231   Node* make_unsafe_address(Node* base, Node* offset, bool is_store);
 232   // Helper for inline_unsafe_access.
 233   // Generates the guards that check whether the result of
 234   // Unsafe.getObject should be recorded in an SATB log buffer.
 235   void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
 236   bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned);
 237   bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
 238   static bool klass_needs_init_guard(Node* kls);
 239   bool inline_unsafe_allocate();
 240   bool inline_unsafe_copyMemory();
 241   bool inline_native_currentThread();
 242 #ifdef TRACE_HAVE_INTRINSICS
 243   bool inline_native_classID();
 244   bool inline_native_threadID();
 245 #endif
 246   bool inline_native_time_funcs(address method, const char* funcName);
 247   bool inline_native_isInterrupted();
 248   bool inline_native_Class_query(vmIntrinsics::ID id);
 249   bool inline_native_subtype_check();
 250 
 251   bool inline_native_newArray();


 316   Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
 317   bool inline_sha_implCompress(vmIntrinsics::ID id);
 318   bool inline_digestBase_implCompressMB(int predicate);
 319   bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
 320                                  bool long_state, address stubAddr, const char *stubName,
 321                                  Node* src_start, Node* ofs, Node* limit);
 322   Node* get_state_from_sha_object(Node *sha_object);
 323   Node* get_state_from_sha5_object(Node *sha_object);
 324   Node* inline_digestBase_implCompressMB_predicate(int predicate);
 325   bool inline_encodeISOArray();
 326   bool inline_updateCRC32();
 327   bool inline_updateBytesCRC32();
 328   bool inline_updateByteBufferCRC32();
 329   bool inline_multiplyToLen();
 330   bool inline_squareToLen();
 331   bool inline_mulAdd();
 332   bool inline_montgomeryMultiply();
 333   bool inline_montgomerySquare();
 334 
 335   bool inline_profileBoolean();
 336 
 337   Node* shenandoah_cast_not_null(Node* n) {
 338     return UseShenandoahGC ? cast_not_null(n, false) : n;
 339   }
 340 };
 341 
 342 
 343 //---------------------------make_vm_intrinsic----------------------------
 344 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
 345   vmIntrinsics::ID id = m->intrinsic_id();
 346   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 347 
 348   ccstr disable_intr = NULL;
 349 
 350   if ((DisableIntrinsic[0] != '\0'
 351        && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) ||
 352       (method_has_option_value("DisableIntrinsic", disable_intr)
 353        && strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL)) {
 354     // disabled by a user request on the command line:
 355     // example: -XX:DisableIntrinsic=_hashCode,_getClass
 356     return NULL;
 357   }
 358 
 359   if (!m->is_loaded()) {


1152 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1153   ciKlass*    thread_klass = env()->Thread_klass();
1154   const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1155   Node* thread = _gvn.transform(new (C) ThreadLocalNode());
1156   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
1157   Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
1158   tls_output = thread;
1159   return threadObj;
1160 }
1161 
1162 
1163 //------------------------------make_string_method_node------------------------
1164 // Helper method for String intrinsic functions. This version is called
1165 // with str1 and str2 pointing to String object nodes.
1166 //
1167 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1, Node* str2) {
1168   Node* no_ctrl = NULL;
1169 
1170   // Get start addr of string
1171   Node* str1_value   = load_String_value(no_ctrl, str1);
1172   str1_value = shenandoah_cast_not_null(str1_value);
1173   str1_value = shenandoah_read_barrier(str1_value);
1174   Node* str1_offset  = load_String_offset(no_ctrl, str1);
1175   Node* str1_start   = array_element_address(str1_value, str1_offset, T_CHAR);
1176 
1177   // Get length of string 1
1178   Node* str1_len  = load_String_length(no_ctrl, str1);
1179 
1180   Node* str2_value   = load_String_value(no_ctrl, str2);
1181   str2_value = shenandoah_cast_not_null(str2_value);
1182   str2_value = shenandoah_read_barrier(str2_value);
1183   Node* str2_offset  = load_String_offset(no_ctrl, str2);
1184   Node* str2_start   = array_element_address(str2_value, str2_offset, T_CHAR);
1185 
1186   Node* str2_len = NULL;
1187   Node* result = NULL;
1188 
1189   switch (opcode) {
1190   case Op_StrIndexOf:
1191     // Get length of string 2
1192     str2_len = load_String_length(no_ctrl, str2);
1193 
1194     result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
1195                                  str1_start, str1_len, str2_start, str2_len);
1196     break;
1197   case Op_StrComp:
1198     // Get length of string 2
1199     str2_len = load_String_length(no_ctrl, str2);
1200 
1201     result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
1202                                  str1_start, str1_len, str2_start, str2_len);


1256   }
1257   set_result(make_string_method_node(Op_StrComp, receiver, arg));
1258   return true;
1259 }
1260 
1261 //------------------------------inline_string_equals------------------------
1262 bool LibraryCallKit::inline_string_equals() {
1263   Node* receiver = null_check_receiver();
1264   // NOTE: Do not null check argument for String.equals() because spec
1265   // allows to specify NULL as argument.
1266   Node* argument = this->argument(1);
1267   if (stopped()) {
1268     return true;
1269   }
1270 
1271   // paths (plus control) merge
1272   RegionNode* region = new (C) RegionNode(5);
1273   Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
1274 
1275   // does source == target string?
1276   receiver = shenandoah_write_barrier(receiver);
1277   argument = shenandoah_write_barrier(argument);
1278   Node* cmp = _gvn.transform(new (C) CmpPNode(receiver, argument));
1279   Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));
1280 
1281   Node* if_eq = generate_slow_guard(bol, NULL);
1282   if (if_eq != NULL) {
1283     // receiver == argument
1284     phi->init_req(2, intcon(1));
1285     region->init_req(2, if_eq);
1286   }
1287 
1288   // get String klass for instanceOf
1289   ciInstanceKlass* klass = env()->String_klass();
1290 
1291   if (!stopped()) {
1292     Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
1293     Node* cmp  = _gvn.transform(new (C) CmpINode(inst, intcon(1)));
1294     Node* bol  = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
1295 
1296     Node* inst_false = generate_guard(bol, NULL, PROB_MIN);
1297     //instanceOf == true, fallthrough
1298 
1299     if (inst_false != NULL) {
1300       phi->init_req(3, intcon(0));
1301       region->init_req(3, inst_false);
1302     }
1303   }
1304 
1305   if (!stopped()) {
1306     const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
1307 
1308     // Properly cast the argument to String
1309     argument = _gvn.transform(new (C) CheckCastPPNode(control(), argument, string_type));
1310     // This path is taken only when argument's type is String:NotNull.
1311     argument = cast_not_null(argument, false);
1312 
1313     Node* no_ctrl = NULL;
1314 
1315     // Get start addr of receiver
1316     Node* receiver_val    = load_String_value(no_ctrl, receiver);
1317     receiver_val = shenandoah_cast_not_null(receiver_val);
1318     receiver_val = shenandoah_read_barrier(receiver_val);
1319     Node* receiver_offset = load_String_offset(no_ctrl, receiver);
1320     Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR);
1321 
1322     // Get length of receiver
1323     Node* receiver_cnt  = load_String_length(no_ctrl, receiver);
1324 
1325     // Get start addr of argument
1326     Node* argument_val    = load_String_value(no_ctrl, argument);
1327     argument_val = shenandoah_cast_not_null(argument_val);
1328     argument_val = shenandoah_read_barrier(argument_val);
1329     Node* argument_offset = load_String_offset(no_ctrl, argument);
1330     Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
1331 
1332     // Get length of argument
1333     Node* argument_cnt  = load_String_length(no_ctrl, argument);
1334 
1335     // Check for receiver count != argument count
1336     Node* cmp = _gvn.transform(new(C) CmpINode(receiver_cnt, argument_cnt));
1337     Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::ne));
1338     Node* if_ne = generate_slow_guard(bol, NULL);
1339     if (if_ne != NULL) {
1340       phi->init_req(4, intcon(0));
1341       region->init_req(4, if_ne);
1342     }
1343 
1344     // Check for count == 0 is done by assembler code for StrEquals.
1345 
1346     if (!stopped()) {
1347       Node* equals = make_string_method_node(Op_StrEquals, receiver_start, receiver_cnt, argument_start, argument_cnt);
1348       phi->init_req(1, equals);
1349       region->init_req(1, control());
1350     }
1351   }
1352 
1353   // post merge
1354   set_control(_gvn.transform(region));
1355   record_for_igvn(region);
1356 
1357   set_result(_gvn.transform(phi));
1358   return true;
1359 }
1360 
1361 //------------------------------inline_array_equals----------------------------
1362 bool LibraryCallKit::inline_array_equals() {
1363   Node* arg1 = argument(0);
1364   Node* arg2 = argument(1);
1365 
1366   arg1 = shenandoah_read_barrier(arg1);
1367   arg2 = shenandoah_read_barrier(arg2);
1368 
1369   set_result(_gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
1370   return true;
1371 }
1372 
1373 // Java version of String.indexOf(constant string)
1374 // class StringDecl {
1375 //   StringDecl(char[] ca) {
1376 //     offset = 0;
1377 //     count = ca.length;
1378 //     value = ca;
1379 //   }
1380 //   int offset;
1381 //   int count;
1382 //   char[] value;
1383 // }
1384 //
1385 // static int string_indexOf_J(StringDecl string_object, char[] target_object,
1386 //                             int targetOffset, int cache_i, int md2) {
1387 //   int cache = cache_i;
1388 //   int sourceOffset = string_object.offset;


1424 //     }
1425 //     if ((cache & (1 << src)) == 0) {
1426 //       i += targetCountLess1;
1427 //     } // using "i += targetCount;" and an "else i++;" causes a jump to jump.
1428 //     i++;
1429 //   }
1430 //   return -1;
1431 // }
1432 
1433 //------------------------------string_indexOf------------------------
1434 Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_array, jint targetOffset_i,
1435                                      jint cache_i, jint md2_i) {
1436 
1437   Node* no_ctrl  = NULL;
1438   float likely   = PROB_LIKELY(0.9);
1439   float unlikely = PROB_UNLIKELY(0.9);
1440 
1441   const int nargs = 0; // no arguments to push back for uncommon trap in predicate
1442 
1443   Node* source        = load_String_value(no_ctrl, string_object);
1444   source = shenandoah_cast_not_null(source);
1445   source = shenandoah_read_barrier(source);
1446   Node* sourceOffset  = load_String_offset(no_ctrl, string_object);
1447   Node* sourceCount   = load_String_length(no_ctrl, string_object);
1448 
1449   Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)));
1450   jint target_length = target_array->length();
1451   const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
1452   const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
1453 
1454   // String.value field is known to be @Stable.
1455   if (UseImplicitStableValues) {
1456     target = cast_array_to_stable(target, target_type);
1457   }
1458 
1459   target = shenandoah_read_barrier(target);
1460 
1461   IdealKit kit(this, false, true);
1462 #define __ kit.
1463   Node* zero             = __ ConI(0);
1464   Node* one              = __ ConI(1);
1465   Node* cache            = __ ConI(cache_i);
1466   Node* md2              = __ ConI(md2_i);
1467   Node* lastChar         = __ ConI(target_array->char_at(target_length - 1));
1468   Node* targetCount      = __ ConI(target_length);
1469   Node* targetCountLess1 = __ ConI(target_length - 1);
1470   Node* targetOffset     = __ ConI(targetOffset_i);
1471   Node* sourceEnd        = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
1472 
1473   IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
1474   Node* outer_loop = __ make_label(2 /* goto */);
1475   Node* return_    = __ make_label(1);
1476 
1477   __ set(rtn,__ ConI(-1));
1478   __ loop(this, nargs, i, sourceOffset, BoolTest::lt, sourceEnd); {
1479        Node* i2  = __ AddI(__ value(i), targetCountLess1);
1480        // pin to prohibit loading of "next iteration" value which may SEGV (rare)


1528       UseSSE42Intrinsics) {
1529     // Generate SSE4.2 version of indexOf
1530     // We currently only have match rules that use SSE4.2
1531 
1532     receiver = null_check(receiver);
1533     arg      = null_check(arg);
1534     if (stopped()) {
1535       return true;
1536     }
1537 
1538     ciInstanceKlass* str_klass = env()->String_klass();
1539     const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(str_klass);
1540 
1541     // Make the merge point
1542     RegionNode* result_rgn = new (C) RegionNode(4);
1543     Node*       result_phi = new (C) PhiNode(result_rgn, TypeInt::INT);
1544     Node* no_ctrl  = NULL;
1545 
1546     // Get start addr of source string
1547     Node* source = load_String_value(no_ctrl, receiver);
1548     source = shenandoah_cast_not_null(source);
1549     source = shenandoah_read_barrier(source);
1550     Node* source_offset = load_String_offset(no_ctrl, receiver);
1551     Node* source_start = array_element_address(source, source_offset, T_CHAR);
1552 
1553     // Get length of source string
1554     Node* source_cnt  = load_String_length(no_ctrl, receiver);
1555 
1556     // Get start addr of substring
1557     Node* substr = load_String_value(no_ctrl, arg);
1558     substr = shenandoah_cast_not_null(substr);
1559     substr = shenandoah_read_barrier(substr);
1560     Node* substr_offset = load_String_offset(no_ctrl, arg);
1561     Node* substr_start = array_element_address(substr, substr_offset, T_CHAR);
1562 
1563     // Get length of source string
1564     Node* substr_cnt  = load_String_length(no_ctrl, arg);
1565 
1566     // Check for substr count > string count
1567     Node* cmp = _gvn.transform(new(C) CmpINode(substr_cnt, source_cnt));
1568     Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::gt));
1569     Node* if_gt = generate_slow_guard(bol, NULL);
1570     if (if_gt != NULL) {
1571       result_phi->init_req(2, intcon(-1));
1572       result_rgn->init_req(2, if_gt);
1573     }
1574 
1575     if (!stopped()) {
1576       // Check for substr count == 0
1577       cmp = _gvn.transform(new(C) CmpINode(substr_cnt, intcon(0)));
1578       bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
1579       Node* if_zero = generate_slow_guard(bol, NULL);


1612     ciObject* v = str->field_value_by_offset(java_lang_String::value_offset_in_bytes()).as_object();
1613     ciTypeArray* pat = v->as_type_array(); // pattern (argument) character array
1614 
1615     int o;
1616     int c;
1617     if (java_lang_String::has_offset_field()) {
1618       o = str->field_value_by_offset(java_lang_String::offset_offset_in_bytes()).as_int();
1619       c = str->field_value_by_offset(java_lang_String::count_offset_in_bytes()).as_int();
1620     } else {
1621       o = 0;
1622       c = pat->length();
1623     }
1624 
1625     // constant strings have no offset and count == length which
1626     // simplifies the resulting code somewhat so lets optimize for that.
1627     if (o != 0 || c != pat->length()) {
1628      return false;
1629     }
1630 
1631     receiver = null_check(receiver, T_OBJECT);
1632     receiver = shenandoah_read_barrier(receiver);
1633     // NOTE: No null check on the argument is needed since it's a constant String oop.
1634     if (stopped()) {
1635       return true;
1636     }
1637 
1638     // The null string as a pattern always returns 0 (match at beginning of string)
1639     if (c == 0) {
1640       set_result(intcon(0));
1641       return true;
1642     }
1643 
1644     // Generate default indexOf
1645     jchar lastChar = pat->char_at(o + (c - 1));
1646     int cache = 0;
1647     int i;
1648     for (i = 0; i < c - 1; i++) {
1649       assert(i < pat->length(), "out of range");
1650       cache |= (1 << (pat->char_at(o + i) & (sizeof(cache) * BitsPerByte - 1)));
1651     }
1652 


2378     // Base is never null => always a heap address.
2379     if (base_type->ptr() == TypePtr::NotNull) {
2380       return Type::OopPtr;
2381     }
2382     // Offset is small => always a heap address.
2383     const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2384     if (offset_type != NULL &&
2385         base_type->offset() == 0 &&     // (should always be?)
2386         offset_type->_lo >= 0 &&
2387         !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2388       return Type::OopPtr;
2389     }
2390     // Otherwise, it might either be oop+off or NULL+addr.
2391     return Type::AnyPtr;
2392   } else {
2393     // No information:
2394     return Type::AnyPtr;
2395   }
2396 }
2397 
2398 inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset, bool is_store) {
2399   int kind = classify_unsafe_addr(base, offset);
2400   if (kind == Type::RawPtr) {
2401     return basic_plus_adr(top(), base, offset);
2402   } else {
2403     if (UseShenandoahGC) {
2404       if (kind == Type::OopPtr) {
2405         // A cast without a null check should be sufficient here (we
2406         // know base is an oop with a low offset so it can't be null)
2407         // but if there's a dominating null check with both branches
2408         // taken and the cast is pushed in both branches, the cast
2409         // will become top in the null branch but the control flow
2410         // won't go away. Use a null check instead. Worst case, the
2411         // null check becomes an implicit null check with the follow
2412         // barrier and is essentially free.
2413         Node* ctrl = top();
2414         base = null_check_oop(base, &ctrl, true);
2415         if (is_store) {
2416           base = shenandoah_write_barrier(base);
2417         } else {
2418           base = shenandoah_read_barrier(base);
2419         }
2420       } else if (kind == Type::AnyPtr) {
2421         if (UseShenandoahGC &&
2422             _gvn.type(base)->isa_aryptr()) {
2423           Node* ctrl = top();
2424           base = null_check_oop(base, &ctrl, true);
2425         }
2426 
2427         if (is_store) {
2428           base = shenandoah_write_barrier(base);
2429         } else {
2430           base = shenandoah_read_barrier(base);
2431         }
2432       }
2433     }
2434     return basic_plus_adr(base, offset);
2435   }
2436 }
2437 
2438 //--------------------------inline_number_methods-----------------------------
2439 // inline int     Integer.numberOfLeadingZeros(int)
2440 // inline int        Long.numberOfLeadingZeros(long)
2441 //
2442 // inline int     Integer.numberOfTrailingZeros(int)
2443 // inline int        Long.numberOfTrailingZeros(long)
2444 //
2445 // inline int     Integer.bitCount(int)
2446 // inline int        Long.bitCount(long)
2447 //
2448 // inline char  Character.reverseBytes(char)
2449 // inline short     Short.reverseBytes(short)
2450 // inline int     Integer.reverseBytes(int)
2451 // inline long       Long.reverseBytes(long)
2452 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2453   Node* arg = argument(0);


2465   case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
2466   default:  fatal_unexpected_iid(id);  break;
2467   }
2468   set_result(_gvn.transform(n));
2469   return true;
2470 }
2471 
2472 //----------------------------inline_unsafe_access----------------------------
2473 
2474 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2475 
2476 // Helper that guards and inserts a pre-barrier.
2477 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2478                                         Node* pre_val, bool need_mem_bar) {
2479   // We could be accessing the referent field of a reference object. If so, when G1
2480   // is enabled, we need to log the value in the referent field in an SATB buffer.
2481   // This routine performs some compile time filters and generates suitable
2482   // runtime filters that guard the pre-barrier code.
2483   // Also add memory barrier for non volatile load from the referent field
2484   // to prevent commoning of loads across safepoint.
2485   if (!(UseG1GC || UseShenandoahGC) && !need_mem_bar)
2486     return;
2487 
2488   // Some compile time checks.
2489 
2490   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2491   const TypeX* otype = offset->find_intptr_t_type();
2492   if (otype != NULL && otype->is_con() &&
2493       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2494     // Constant offset but not the reference_offset so just return
2495     return;
2496   }
2497 
2498   // We only need to generate the runtime guards for instances.
2499   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2500   if (btype != NULL) {
2501     if (btype->isa_aryptr()) {
2502       // Array type so nothing to do
2503       return;
2504     }
2505 


2665 
2666   // Build address expression.  See the code in inline_unsafe_prefetch.
2667   Node* adr;
2668   Node* heap_base_oop = top();
2669   Node* offset = top();
2670   Node* val;
2671 
2672   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2673   Node* base = argument(1);  // type: oop
2674 
2675   if (!is_native_ptr) {
2676     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2677     offset = argument(2);  // type: long
2678     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2679     // to be plain byte offsets, which are also the same as those accepted
2680     // by oopDesc::field_base.
2681     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2682            "fieldOffset must be byte-scaled");
2683     // 32-bit machines ignore the high half!
2684     offset = ConvL2X(offset);
2685     adr = make_unsafe_address(base, offset, is_store);
2686     heap_base_oop = base;
2687     val = is_store ? argument(4) : NULL;
2688   } else {
2689     Node* ptr = argument(1);  // type: long
2690     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2691     adr = make_unsafe_address(NULL, ptr, is_store);
2692     val = is_store ? argument(3) : NULL;
2693   }
2694 
2695   if ((_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) && type == T_OBJECT) {
2696     return false; // off-heap oop accesses are not supported
2697   }
2698 
2699   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2700 
2701   // Try to categorize the address.
2702   Compile::AliasType* alias_type = C->alias_type(adr_type);
2703   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2704 
2705   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2706       alias_type->adr_type() == TypeAryPtr::RANGE) {
2707     return false; // not supported
2708   }
2709 
2710   bool mismatched = false;
2711   BasicType bt = alias_type->basic_type();


2722     }
2723     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2724       // Don't intrinsify mismatched object accesses
2725       return false;
2726     }
2727     mismatched = (bt != type);
2728   }
2729 
2730   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2731 
2732   // First guess at the value type.
2733   const Type *value_type = Type::get_const_basic_type(type);
2734 
2735   // We will need memory barriers unless we can determine a unique
2736   // alias category for this reference.  (Note:  If for some reason
2737   // the barriers get omitted and the unsafe reference begins to "pollute"
2738   // the alias analysis of the rest of the graph, either Compile::can_alias
2739   // or Compile::must_alias will throw a diagnostic assert.)
2740   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2741 
2742 #if INCLUDE_ALL_GCS
2743   // Work around JDK-8220714 bug. This is done for Shenandoah only, until
2744   // the shared code fix is upstreamed and properly tested there.
2745   if (UseShenandoahGC) {
2746     need_mem_bar |= is_native_ptr;
2747   }
2748 #endif
2749 
2750   // If we are reading the value of the referent field of a Reference
2751   // object (either by using Unsafe directly or through reflection)
2752   // then, if G1 is enabled, we need to record the referent in an
2753   // SATB log buffer using the pre-barrier mechanism.
2754   // Also we need to add memory barrier to prevent commoning reads
2755   // from this field across safepoint since GC can change its value.
2756   bool need_read_barrier = !is_native_ptr && !is_store &&
2757                            offset != top() && heap_base_oop != top();
2758 
2759   if (!is_store && type == T_OBJECT) {
2760     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2761     if (tjp != NULL) {
2762       value_type = tjp;
2763     }
2764   }
2765 
2766   receiver = null_check(receiver);
2767   if (stopped()) {
2768     return true;
2769   }
2770   // Heap pointers get a null-check from the interpreter,
2771   // as a courtesy.  However, this is not guaranteed by Unsafe,
2772   // and it is not possible to fully distinguish unintended nulls
2773   // from intended ones in this API.
2774 
2775   Node* load = NULL;
2776   Node* store = NULL;
2777   Node* leading_membar = NULL;
2778   if (is_volatile) {
2779     // We need to emit leading and trailing CPU membars (see below) in
2780     // addition to memory membars when is_volatile. This is a little
2781     // too strong, but avoids the need to insert per-alias-type
2782     // volatile membars (for stores; compare Parse::do_put_xxx), which
2783     // we cannot do effectively here because we probably only have a
2784     // rough approximation of type.
2785     need_mem_bar = true;
2786     // For Stores, place a memory ordering barrier now.
2787     if (is_store) {
2788       leading_membar = insert_mem_bar(Op_MemBarRelease);
2789     } else {
2790       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2791         leading_membar = insert_mem_bar(Op_MemBarVolatile);
2792       }
2793     }
2794   }
2795 
2796   // Memory barrier to prevent normal and 'unsafe' accesses from
2797   // bypassing each other.  Happens after null checks, so the
2798   // exception paths do not take memory state from the memory barrier,
2799   // so there's no problems making a strong assert about mixing users
2800   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2801   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2802   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2803 
2804   if (!is_store) {
2805     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2806     // To be valid, unsafe loads may depend on other conditions than
2807     // the one that guards them: pin the Load node
2808     load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2809     // load value
2810     switch (type) {
2811     case T_BOOLEAN:
2812     case T_CHAR:
2813     case T_BYTE:
2814     case T_SHORT:
2815     case T_INT:
2816     case T_LONG:
2817     case T_FLOAT:
2818     case T_DOUBLE:
2819       break;
2820     case T_OBJECT:
2821       if (need_read_barrier) {
2822         insert_pre_barrier(heap_base_oop, offset, load, !(is_volatile || need_mem_bar));
2823       }
2824       break;
2825     case T_ADDRESS:
2826       // Cast to an int type.
2827       load = _gvn.transform(new (C) CastP2XNode(NULL, load));
2828       load = ConvX2UL(load);
2829       break;
2830     default:
2831       fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2832       break;
2833     }
2834     // The load node has the control of the preceding MemBarCPUOrder.  All
2835     // following nodes will have the control of the MemBarCPUOrder inserted at
2836     // the end of this method.  So, pushing the load onto the stack at a later
2837     // point is fine.
2838     set_result(load);
2839   } else {
2840     // place effect of store into memory
2841     switch (type) {
2842     case T_DOUBLE:
2843       val = dstore_rounding(val);
2844       break;
2845     case T_ADDRESS:
2846       // Repackage the long as a pointer.
2847       val = ConvL2X(val);
2848       val = _gvn.transform(new (C) CastX2PNode(val));
2849       break;
2850     }
2851 
2852     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2853     if (type == T_OBJECT ) {
2854       val = shenandoah_read_barrier_storeval(val);
2855       store = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2856     } else {
2857       store = store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2858     }
2859   }
2860 
2861   if (is_volatile) {
2862     if (!is_store) {
2863       Node* mb = insert_mem_bar(Op_MemBarAcquire, load);
2864       mb->as_MemBar()->set_trailing_load();
2865     } else {
2866       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2867         Node* mb = insert_mem_bar(Op_MemBarVolatile, store);
2868         MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
2869       }
2870     }
2871   }
2872 
2873   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2874 
2875   return true;
2876 }
2877 
2878 //----------------------------inline_unsafe_prefetch----------------------------
2879 
2880 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2881 #ifndef PRODUCT
2882   {
2883     ResourceMark rm;
2884     // Check the signatures.
2885     ciSignature* sig = callee()->signature();
2886 #ifdef ASSERT
2887     // Object getObject(Object base, int/long offset), etc.
2888     BasicType rtype = sig->return_type()->basic_type();


2905     null_check_receiver();
2906     if (stopped()) {
2907       return true;
2908     }
2909   }
2910 
2911   // Build address expression.  See the code in inline_unsafe_access.
2912   Node *adr;
2913   if (!is_native_ptr) {
2914     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2915     Node* base   = argument(idx + 0);  // type: oop
2916     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2917     Node* offset = argument(idx + 1);  // type: long
2918     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2919     // to be plain byte offsets, which are also the same as those accepted
2920     // by oopDesc::field_base.
2921     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2922            "fieldOffset must be byte-scaled");
2923     // 32-bit machines ignore the high half!
2924     offset = ConvL2X(offset);
2925     adr = make_unsafe_address(base, offset, false);
2926   } else {
2927     Node* ptr = argument(idx + 0);  // type: long
2928     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2929     adr = make_unsafe_address(NULL, ptr, false);
2930   }
2931 
2932   // Generate the read or write prefetch
2933   Node *prefetch;
2934   if (is_store) {
2935     prefetch = new (C) PrefetchWriteNode(i_o(), adr);
2936   } else {
2937     prefetch = new (C) PrefetchReadNode(i_o(), adr);
2938   }
2939   prefetch->init_req(0, control());
2940   set_i_o(_gvn.transform(prefetch));
2941 
2942   return true;
2943 }
2944 
2945 //----------------------------inline_unsafe_load_store----------------------------
2946 // This method serves a couple of different customers (depending on LoadStoreKind):
2947 //
2948 // LS_cmpxchg:
2949 //   public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);


3012     receiver = argument(0);  // type: oop
3013     base     = argument(1);  // type: oop
3014     offset   = argument(2);  // type: long
3015     oldval   = argument(4);  // type: oop, int, or long
3016     newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
3017   } else if (kind == LS_xadd || kind == LS_xchg){
3018     receiver = argument(0);  // type: oop
3019     base     = argument(1);  // type: oop
3020     offset   = argument(2);  // type: long
3021     oldval   = NULL;
3022     newval   = argument(4);  // type: oop, int, or long
3023   }
3024 
3025   // Build field offset expression.
3026   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
3027   // to be plain byte offsets, which are also the same as those accepted
3028   // by oopDesc::field_base.
3029   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
3030   // 32-bit machines ignore the high half of long offsets
3031   offset = ConvL2X(offset);
3032   Node* adr = make_unsafe_address(base, offset, true);
3033   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
3034 
3035   Compile::AliasType* alias_type = C->alias_type(adr_type);
3036   BasicType bt = alias_type->basic_type();
3037   if (bt != T_ILLEGAL &&
3038       ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
3039     // Don't intrinsify mismatched object accesses.
3040     return false;
3041   }
3042 
3043   // For CAS, unlike inline_unsafe_access, there seems no point in
3044   // trying to refine types. Just use the coarse types here.
3045   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
3046   const Type *value_type = Type::get_const_basic_type(type);
3047 
3048   if (kind == LS_xchg && type == T_OBJECT) {
3049     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
3050     if (tjp != NULL) {
3051       value_type = tjp;
3052     }
3053   }
3054 
3055   // Null check receiver.
3056   receiver = null_check(receiver);
3057   if (stopped()) {
3058     return true;
3059   }
3060 
3061   int alias_idx = C->get_alias_index(adr_type);
3062 
3063   // Memory-model-wise, a LoadStore acts like a little synchronized
3064   // block, so needs barriers on each side.  These don't translate
3065   // into actual barriers on most machines, but we still need rest of
3066   // compiler to respect ordering.
3067 
3068   Node* leading_membar = insert_mem_bar(Op_MemBarRelease);
3069   insert_mem_bar(Op_MemBarCPUOrder);
3070 
3071   // 4984716: MemBars must be inserted before this
3072   //          memory node in order to avoid a false
3073   //          dependency which will confuse the scheduler.
3074   Node *mem = memory(alias_idx);
3075 
3076   // For now, we handle only those cases that actually exist: ints,
3077   // longs, and Object. Adding others should be straightforward.
3078   Node* load_store = NULL;
3079   switch(type) {
3080   case T_INT:
3081     if (kind == LS_xadd) {
3082       load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
3083     } else if (kind == LS_xchg) {
3084       load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
3085     } else if (kind == LS_cmpxchg) {
3086       load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
3087     } else {
3088       ShouldNotReachHere();
3089     }
3090     break;
3091   case T_LONG:
3092     if (kind == LS_xadd) {
3093       load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
3094     } else if (kind == LS_xchg) {
3095       load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
3096     } else if (kind == LS_cmpxchg) {
3097       load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
3098     } else {
3099       ShouldNotReachHere();
3100     }
3101     break;
3102   case T_OBJECT:
3103     // Transformation of a value which could be NULL pointer (CastPP #NULL)
3104     // could be delayed during Parse (for example, in adjust_map_after_if()).
3105     // Execute transformation here to avoid barrier generation in such case.
3106     if (_gvn.type(newval) == TypePtr::NULL_PTR)
3107       newval = _gvn.makecon(TypePtr::NULL_PTR);
3108 
3109     newval = shenandoah_read_barrier_storeval(newval);
3110 
3111     // Reference stores need a store barrier.
3112     if (kind == LS_xchg) {
3113       // If pre-barrier must execute before the oop store, old value will require do_load here.
3114       if (!can_move_pre_barrier()) {
3115         pre_barrier(true /* do_load*/,
3116                     control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
3117                     NULL /* pre_val*/,
3118                     T_OBJECT);
3119       } // Else move pre_barrier to use load_store value, see below.
3120     } else if (kind == LS_cmpxchg) {
3121       // Same as for newval above:
3122       if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
3123         oldval = _gvn.makecon(TypePtr::NULL_PTR);
3124       }
3125       // The only known value which might get overwritten is oldval.
3126       pre_barrier(false /* do_load */,
3127                   control(), NULL, NULL, max_juint, NULL, NULL,
3128                   oldval /* pre_val */,
3129                   T_OBJECT);
3130     } else {


3149       if (kind == LS_xchg) {
3150         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
3151       } else {
3152         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3153         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
3154       }
3155     }
3156     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3157     break;
3158   default:
3159     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3160     break;
3161   }
3162 
3163   // SCMemProjNodes represent the memory state of a LoadStore. Their
3164   // main role is to prevent LoadStore nodes from being optimized away
3165   // when their results aren't used.
3166   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3167   set_memory(proj, alias_idx);
3168 
3169   Node* access = load_store;
3170 
3171   if (type == T_OBJECT && kind == LS_xchg) {
3172 #ifdef _LP64
3173     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3174       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3175     }
3176 #endif
3177     if (can_move_pre_barrier()) {
3178       // Don't need to load pre_val. The old value is returned by load_store.
3179       // The pre_barrier can execute after the xchg as long as no safepoint
3180       // gets inserted between them.
3181       pre_barrier(false /* do_load */,
3182                   control(), NULL, NULL, max_juint, NULL, NULL,
3183                   load_store /* pre_val */,
3184                   T_OBJECT);
3185     }
3186   }
3187 
3188   // Add the trailing membar surrounding the access
3189   insert_mem_bar(Op_MemBarCPUOrder);
3190   Node* mb = insert_mem_bar(Op_MemBarAcquire, access);
3191   MemBarNode::set_load_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
3192 
3193   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3194   set_result(load_store);
3195   return true;
3196 }
3197 
3198 //----------------------------inline_unsafe_ordered_store----------------------
3199 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3200 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3201 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3202 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3203   // This is another variant of inline_unsafe_access, differing in
3204   // that it always issues store-store ("release") barrier and ensures
3205   // store-atomicity (which only matters for "long").
3206 
3207   if (callee()->is_static())  return false;  // caller must have the capability!
3208 
3209 #ifndef PRODUCT
3210   {
3211     ResourceMark rm;


3222 #endif //PRODUCT
3223 
3224   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
3225 
3226   // Get arguments:
3227   Node* receiver = argument(0);  // type: oop
3228   Node* base     = argument(1);  // type: oop
3229   Node* offset   = argument(2);  // type: long
3230   Node* val      = argument(4);  // type: oop, int, or long
3231 
3232   // Null check receiver.
3233   receiver = null_check(receiver);
3234   if (stopped()) {
3235     return true;
3236   }
3237 
3238   // Build field offset expression.
3239   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
3240   // 32-bit machines ignore the high half of long offsets
3241   offset = ConvL2X(offset);
3242   Node* adr = make_unsafe_address(base, offset, true);
3243   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
3244   const Type *value_type = Type::get_const_basic_type(type);
3245   Compile::AliasType* alias_type = C->alias_type(adr_type);
3246 
3247   insert_mem_bar(Op_MemBarRelease);
3248   insert_mem_bar(Op_MemBarCPUOrder);
3249   // Ensure that the store is atomic for longs:
3250   const bool require_atomic_access = true;
3251   Node* store;
3252   if (type == T_OBJECT) { // reference stores need a store barrier.
3253     val = shenandoah_read_barrier_storeval(val);
3254     store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
3255   } else {
3256     store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
3257   }
3258   insert_mem_bar(Op_MemBarCPUOrder);
3259   return true;
3260 }
3261 
3262 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
3263   // Regardless of form, don't allow previous ld/st to move down,
3264   // then issue acquire, release, or volatile mem_bar.
3265   insert_mem_bar(Op_MemBarCPUOrder);
3266   switch(id) {
3267     case vmIntrinsics::_loadFence:
3268       insert_mem_bar(Op_LoadFence);
3269       return true;
3270     case vmIntrinsics::_storeFence:
3271       insert_mem_bar(Op_StoreFence);
3272       return true;
3273     case vmIntrinsics::_fullFence:
3274       insert_mem_bar(Op_MemBarVolatile);
3275       return true;


3413     no_int_result_path   = 1, // t == Thread.current() && !TLS._osthread._interrupted
3414     no_clear_result_path = 2, // t == Thread.current() &&  TLS._osthread._interrupted && !clear_int
3415     slow_result_path     = 3, // slow path: t.isInterrupted(clear_int)
3416     PATH_LIMIT
3417   };
3418 
3419   // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
3420   // out of the function.
3421   insert_mem_bar(Op_MemBarCPUOrder);
3422 
3423   RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT);
3424   PhiNode*    result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
3425 
3426   RegionNode* slow_region = new (C) RegionNode(1);
3427   record_for_igvn(slow_region);
3428 
3429   // (a) Receiving thread must be the current thread.
3430   Node* rec_thr = argument(0);
3431   Node* tls_ptr = NULL;
3432   Node* cur_thr = generate_current_thread(tls_ptr);
3433   cur_thr = shenandoah_write_barrier(cur_thr);
3434   rec_thr = shenandoah_write_barrier(rec_thr);
3435   Node* cmp_thr = _gvn.transform(new (C) CmpPNode(cur_thr, rec_thr));
3436   Node* bol_thr = _gvn.transform(new (C) BoolNode(cmp_thr, BoolTest::ne));
3437 
3438   generate_slow_guard(bol_thr, slow_region);
3439 
3440   // (b) Interrupt bit on TLS must be false.
3441   Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3442   Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3443   p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3444 
3445   // Set the control input on the field _interrupted read to prevent it floating up.
3446   Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
3447   Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0)));
3448   Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne));
3449 
3450   IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3451 
3452   // First fast path:  if (!TLS._interrupted) return false;
3453   Node* false_bit = _gvn.transform(new (C) IfFalseNode(iff_bit));
3454   result_rgn->init_req(no_int_result_path, false_bit);


3760   // Pull both arguments off the stack.
3761   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3762   args[0] = argument(0);
3763   args[1] = argument(1);
3764   Node* klasses[2];             // corresponding Klasses: superk, subk
3765   klasses[0] = klasses[1] = top();
3766 
3767   enum {
3768     // A full decision tree on {superc is prim, subc is prim}:
3769     _prim_0_path = 1,           // {P,N} => false
3770                                 // {P,P} & superc!=subc => false
3771     _prim_same_path,            // {P,P} & superc==subc => true
3772     _prim_1_path,               // {N,P} => false
3773     _ref_subtype_path,          // {N,N} & subtype check wins => true
3774     _both_ref_path,             // {N,N} & subtype check loses => false
3775     PATH_LIMIT
3776   };
3777 
3778   RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3779   Node*       phi    = new (C) PhiNode(region, TypeInt::BOOL);
3780   Node*       mem_phi= new (C) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
3781   record_for_igvn(region);
3782   Node* init_mem = map()->memory();
3783 
3784   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3785   const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3786   int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3787 
3788   // First null-check both mirrors and load each mirror's klass metaobject.
3789   int which_arg;
3790   for (which_arg = 0; which_arg <= 1; which_arg++) {
3791     Node* arg = args[which_arg];
3792     arg = null_check(arg);
3793     if (stopped())  break;
3794     args[which_arg] = arg;
3795 
3796     Node* p = basic_plus_adr(arg, class_klass_offset);
3797     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3798     klasses[which_arg] = _gvn.transform(kls);
3799   }
3800 
3801   args[0] = shenandoah_write_barrier(args[0]);
3802   args[1] = shenandoah_write_barrier(args[1]);
3803 
3804   // Having loaded both klasses, test each for null.
3805   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3806   for (which_arg = 0; which_arg <= 1; which_arg++) {
3807     Node* kls = klasses[which_arg];
3808     Node* null_ctl = top();
3809     kls = null_check_oop(kls, &null_ctl, never_see_null);
3810     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3811     region->init_req(prim_path, null_ctl);
3812     if (stopped())  break;
3813     klasses[which_arg] = kls;
3814   }
3815 
3816   if (!stopped()) {
3817     // now we have two reference types, in klasses[0..1]
3818     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3819     Node* superk = klasses[0];  // the receiver
3820     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3821     // now we have a successful reference subtype check
3822     region->set_req(_ref_subtype_path, control());
3823   }
3824 
3825   // If both operands are primitive (both klasses null), then
3826   // we must return true when they are identical primitives.
3827   // It is convenient to test this after the first null klass check.
3828   set_control(region->in(_prim_0_path)); // go back to first null check
3829   if (!stopped()) {
3830     // Since superc is primitive, make a guard for the superc==subc case.
3831     Node* cmp_eq = _gvn.transform(new (C)CmpPNode(args[0], args[1]));
3832     Node* bol_eq = _gvn.transform(new (C) BoolNode(cmp_eq, BoolTest::eq));
3833     generate_guard(bol_eq, region, PROB_FAIR);
3834     if (region->req() == PATH_LIMIT+1) {
3835       // A guard was added.  If the added guard is taken, superc==subc.
3836       region->swap_edges(PATH_LIMIT, _prim_same_path);
3837       region->del_req(PATH_LIMIT);
3838     }
3839     region->set_req(_prim_0_path, control()); // Not equal after all.
3840   }
3841 
3842   // these are the only paths that produce 'true':
3843   phi->set_req(_prim_same_path,   intcon(1));
3844   phi->set_req(_ref_subtype_path, intcon(1));
3845 
3846   // pull together the cases:
3847   assert(region->req() == PATH_LIMIT, "sane region");
3848   Node* cur_mem = reset_memory();
3849   for (uint i = 1; i < region->req(); i++) {
3850     Node* ctl = region->in(i);
3851     if (ctl == NULL || ctl == top()) {
3852       region->set_req(i, top());
3853       phi   ->set_req(i, top());
3854       mem_phi->set_req(i, top());
3855     } else {
3856       if (phi->in(i) == NULL) {
3857       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3858     }
3859       mem_phi->set_req(i, (i == _prim_0_path || i == _prim_same_path) ?  cur_mem : init_mem);
3860     }
3861   }
3862 
3863   set_control(_gvn.transform(region));
3864   set_result(_gvn.transform(phi));
3865   set_all_memory(_gvn.transform(mem_phi));
3866   return true;
3867 }
3868 
3869 //---------------------generate_array_guard_common------------------------
3870 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3871                                                   bool obj_array, bool not_array) {
3872   // If obj_array/non_array==false/false:
3873   // Branch around if the given klass is in fact an array (either obj or prim).
3874   // If obj_array/non_array==false/true:
3875   // Branch around if the given klass is not an array klass of any kind.
3876   // If obj_array/non_array==true/true:
3877   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3878   // If obj_array/non_array==true/false:
3879   // Branch around if the kls is an oop array (Object[] or subtype)
3880   //
3881   // Like generate_guard, adds a new path onto the region.
3882   jint  layout_con = 0;
3883   Node* layout_val = get_layout_helper(kls, layout_con);
3884   if (layout_val == NULL) {
3885     bool query = (obj_array


4060     // Without this the new_array would throw
4061     // NegativeArraySizeException but IllegalArgumentException is what
4062     // should be thrown
4063     generate_negative_guard(length, bailout, &length);
4064 
4065     if (bailout->req() > 1) {
4066       PreserveJVMState pjvms(this);
4067       set_control(_gvn.transform(bailout));
4068       uncommon_trap(Deoptimization::Reason_intrinsic,
4069                     Deoptimization::Action_maybe_recompile);
4070     }
4071 
4072     if (!stopped()) {
4073       // How many elements will we copy from the original?
4074       // The answer is MinI(orig_length - start, length).
4075       Node* orig_tail = _gvn.transform(new (C) SubINode(orig_length, start));
4076       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4077 
4078       newcopy = new_array(klass_node, length, 0);  // no argments to push
4079 
4080       original = shenandoah_read_barrier(original);
4081 
4082       // Generate a direct call to the right arraycopy function(s).
4083       // We know the copy is disjoint but we might not know if the
4084       // oop stores need checking.
4085       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
4086       // This will fail a store-check if x contains any non-nulls.
4087       bool disjoint_bases = true;
4088       // if start > orig_length then the length of the copy may be
4089       // negative.
4090       bool length_never_negative = !is_copyOfRange;
4091       generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
4092                          original, start, newcopy, intcon(0), moved,
4093                          disjoint_bases, length_never_negative);
4094     }
4095   } // original reexecute is set back here
4096 
4097   C->set_has_split_ifs(true); // Has chance for split-if optimization
4098   if (!stopped()) {
4099     set_result(newcopy);
4100   }
4101   return true;


4509 #endif //_LP64
4510 
4511 //----------------------inline_unsafe_copyMemory-------------------------
4512 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4513 bool LibraryCallKit::inline_unsafe_copyMemory() {
4514   if (callee()->is_static())  return false;  // caller must have the capability!
4515   null_check_receiver();  // null-check receiver
4516   if (stopped())  return true;
4517 
4518   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
4519 
4520   Node* src_ptr =         argument(1);   // type: oop
4521   Node* src_off = ConvL2X(argument(2));  // type: long
4522   Node* dst_ptr =         argument(4);   // type: oop
4523   Node* dst_off = ConvL2X(argument(5));  // type: long
4524   Node* size    = ConvL2X(argument(7));  // type: long
4525 
4526   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4527          "fieldOffset must be byte-scaled");
4528 
4529   Node* src = make_unsafe_address(src_ptr, src_off, false);
4530   Node* dst = make_unsafe_address(dst_ptr, dst_off, true);
4531 
4532   // Conservatively insert a memory barrier on all memory slices.
4533   // Do not let writes of the copy source or destination float below the copy.
4534   insert_mem_bar(Op_MemBarCPUOrder);
4535 
4536   // Call it.  Note that the length argument is not scaled.
4537   make_runtime_call(RC_LEAF|RC_NO_FP,
4538                     OptoRuntime::fast_arraycopy_Type(),
4539                     StubRoutines::unsafe_arraycopy(),
4540                     "unsafe_arraycopy",
4541                     TypeRawPtr::BOTTOM,
4542                     src, dst, size XTOP);
4543 
4544   // Do not let reads of the copy destination float above the copy.
4545   insert_mem_bar(Op_MemBarCPUOrder);
4546 
4547   return true;
4548 }
4549 
4550 //------------------------clone_coping-----------------------------------
4551 // Helper function for inline_native_clone.
4552 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
4553   assert(obj_size != NULL, "");
4554   Node* raw_obj = alloc_obj->in(1);
4555   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4556 
4557   obj = shenandoah_read_barrier(obj);
4558 
4559   AllocateNode* alloc = NULL;
4560   if (ReduceBulkZeroing) {
4561     // We will be completely responsible for initializing this object -
4562     // mark Initialize node as complete.
4563     alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4564     // The object was just allocated - there should be no any stores!
4565     guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4566     // Mark as complete_with_arraycopy so that on AllocateNode
4567     // expansion, we know this AllocateNode is initialized by an array
4568     // copy and a StoreStore barrier exists after the array copy.
4569     alloc->initialization()->set_complete_with_arraycopy();
4570   }
4571 
4572   // Copy the fastest available way.
4573   // TODO: generate fields copies for small objects instead.
4574   Node* src  = obj;
4575   Node* dest = alloc_obj;
4576   Node* size = _gvn.transform(obj_size);
4577 
4578   // Exclude the header but include array length to copy by 8 bytes words.


4591     } else {
4592       // Include klass to copy by 8 bytes words.
4593       base_off = instanceOopDesc::klass_offset_in_bytes();
4594     }
4595     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4596   }
4597   src  = basic_plus_adr(src,  base_off);
4598   dest = basic_plus_adr(dest, base_off);
4599 
4600   // Compute the length also, if needed:
4601   Node* countx = size;
4602   countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4603   countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4604 
4605   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4606   bool disjoint_bases = true;
4607   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4608                                src, NULL, dest, NULL, countx,
4609                                /*dest_uninitialized*/true);
4610 
4611   if (UseShenandoahGC && ShenandoahCloneBarrier) {
4612     // Make sure that references in the cloned object are updated for Shenandoah.
4613     make_runtime_call(RC_LEAF|RC_NO_FP,
4614                       OptoRuntime::shenandoah_clone_barrier_Type(),
4615                       CAST_FROM_FN_PTR(address, SharedRuntime::shenandoah_clone_barrier),
4616                       "shenandoah_clone_barrier", TypePtr::BOTTOM,
4617                       alloc_obj);
4618   }
4619 
4620   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4621   if (card_mark) {
4622     assert(!is_array, "");
4623     // Put in store barrier for any and all oops we are sticking
4624     // into this object.  (We could avoid this if we could prove
4625     // that the object type contains no oop fields at all.)
4626     Node* no_particular_value = NULL;
4627     Node* no_particular_field = NULL;
4628     int raw_adr_idx = Compile::AliasIdxRaw;
4629     post_barrier(control(),
4630                  memory(raw_adr_type),
4631                  alloc_obj,
4632                  no_particular_field,
4633                  raw_adr_idx,
4634                  no_particular_value,
4635                  T_OBJECT,
4636                  false);
4637   }
4638 
4639   // Do not let reads from the cloned object float above the arraycopy.


4708 
4709     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4710     int raw_adr_idx = Compile::AliasIdxRaw;
4711 
4712     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4713     if (array_ctl != NULL) {
4714       // It's an array.
4715       PreserveJVMState pjvms(this);
4716       set_control(array_ctl);
4717       Node* obj_length = load_array_length(obj);
4718       Node* obj_size  = NULL;
4719       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push
4720 
4721       if (!use_ReduceInitialCardMarks()) {
4722         // If it is an oop array, it requires very special treatment,
4723         // because card marking is required on each card of the array.
4724         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4725         if (is_obja != NULL) {
4726           PreserveJVMState pjvms2(this);
4727           set_control(is_obja);
4728 
4729           obj = shenandoah_read_barrier(obj);
4730 
4731           // Generate a direct call to the right arraycopy function(s).
4732           bool disjoint_bases = true;
4733           bool length_never_negative = true;
4734           generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
4735                              obj, intcon(0), alloc_obj, intcon(0),
4736                              obj_length,
4737                              disjoint_bases, length_never_negative);
4738           result_reg->init_req(_objArray_path, control());
4739           result_val->init_req(_objArray_path, alloc_obj);
4740           result_i_o ->set_req(_objArray_path, i_o());
4741           result_mem ->set_req(_objArray_path, reset_memory());
4742         }
4743       }
4744       // Otherwise, there are no card marks to worry about.
4745       // (We can dispense with card marks if we know the allocation
4746       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4747       //  causes the non-eden paths to take compensating steps to
4748       //  simulate a fresh allocation, so that no further
4749       //  card marks are required in compiled code to initialize
4750       //  the object.)


4928         src_type  = _gvn.type(src);
4929         top_src  = src_type->isa_aryptr();
4930         has_src = (top_src != NULL && top_src->klass() != NULL);
4931         src_spec = true;
4932       }
4933       if (!has_dest) {
4934         dest = maybe_cast_profiled_obj(dest, dest_k);
4935         dest_type  = _gvn.type(dest);
4936         top_dest  = dest_type->isa_aryptr();
4937         has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4938         dest_spec = true;
4939       }
4940     }
4941   }
4942 
4943   if (!has_src || !has_dest) {
4944     // Conservatively insert a memory barrier on all memory slices.
4945     // Do not let writes into the source float below the arraycopy.
4946     insert_mem_bar(Op_MemBarCPUOrder);
4947 
4948     src = shenandoah_read_barrier(src);
4949     dest = shenandoah_write_barrier(dest);
4950 
4951     // Call StubRoutines::generic_arraycopy stub.
4952     generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
4953                        src, src_offset, dest, dest_offset, length);
4954 
4955     // Do not let reads from the destination float above the arraycopy.
4956     // Since we cannot type the arrays, we don't know which slices
4957     // might be affected.  We could restrict this barrier only to those
4958     // memory slices which pertain to array elements--but don't bother.
4959     if (!InsertMemBarAfterArraycopy)
4960       // (If InsertMemBarAfterArraycopy, there is already one in place.)
4961       insert_mem_bar(Op_MemBarCPUOrder);
4962     return true;
4963   }
4964 
4965   // (2) src and dest arrays must have elements of the same BasicType
4966   // Figure out the size and type of the elements we will be copying.
4967   BasicType src_elem  =  top_src->klass()->as_array_klass()->element_type()->basic_type();
4968   BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4969   if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
4970   if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
4971 
4972   if (src_elem != dest_elem || dest_elem == T_VOID) {
4973     // The component types are not the same or are not recognized.  Punt.
4974     // (But, avoid the native method wrapper to JVM_ArrayCopy.)
4975 
4976     src = shenandoah_read_barrier(src);
4977     dest = shenandoah_write_barrier(dest);
4978 
4979     generate_slow_arraycopy(TypePtr::BOTTOM,
4980                             src, src_offset, dest, dest_offset, length,
4981                             /*dest_uninitialized*/false);
4982     return true;
4983   }
4984 
4985   if (src_elem == T_OBJECT) {
4986     // If both arrays are object arrays then having the exact types
4987     // for both will remove the need for a subtype check at runtime
4988     // before the call and may make it possible to pick a faster copy
4989     // routine (without a subtype check on every element)
4990     // Do we have the exact type of src?
4991     bool could_have_src = src_spec;
4992     // Do we have the exact type of dest?
4993     bool could_have_dest = dest_spec;
4994     ciKlass* src_k = top_src->klass();
4995     ciKlass* dest_k = top_dest->klass();
4996     if (!src_spec) {
4997       src_k = src_type->speculative_type();
4998       if (src_k != NULL && src_k->is_array_klass()) {


5024   // (3) src and dest must not be null.
5025   // (4) src_offset must not be negative.
5026   // (5) dest_offset must not be negative.
5027   // (6) length must not be negative.
5028   // (7) src_offset + length must not exceed length of src.
5029   // (8) dest_offset + length must not exceed length of dest.
5030   // (9) each element of an oop array must be assignable
5031 
5032   RegionNode* slow_region = new (C) RegionNode(1);
5033   record_for_igvn(slow_region);
5034 
5035   // (3) operands must not be null
5036   // We currently perform our null checks with the null_check routine.
5037   // This means that the null exceptions will be reported in the caller
5038   // rather than (correctly) reported inside of the native arraycopy call.
5039   // This should be corrected, given time.  We do our null check with the
5040   // stack pointer restored.
5041   src  = null_check(src,  T_ARRAY);
5042   dest = null_check(dest, T_ARRAY);
5043 
5044   src = shenandoah_read_barrier(src);
5045   dest = shenandoah_write_barrier(dest);
5046 
5047   // (4) src_offset must not be negative.
5048   generate_negative_guard(src_offset, slow_region);
5049 
5050   // (5) dest_offset must not be negative.
5051   generate_negative_guard(dest_offset, slow_region);
5052 
5053   // (6) length must not be negative (moved to generate_arraycopy()).
5054   // generate_negative_guard(length, slow_region);
5055 
5056   // (7) src_offset + length must not exceed length of src.
5057   generate_limit_guard(src_offset, length,
5058                        load_array_length(src),
5059                        slow_region);
5060 
5061   // (8) dest_offset + length must not exceed length of dest.
5062   generate_limit_guard(dest_offset, length,
5063                        load_array_length(dest),
5064                        slow_region);
5065 
5066   // (9) each element of an oop array must be assignable


5343       PreserveJVMState pjvms(this);
5344       set_control(not_subtype_ctrl);
5345       // (At this point we can assume disjoint_bases, since types differ.)
5346       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5347       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5348       Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5349       Node* dest_elem_klass = _gvn.transform(n1);
5350       Node* cv = generate_checkcast_arraycopy(adr_type,
5351                                               dest_elem_klass,
5352                                               src, src_offset, dest, dest_offset,
5353                                               ConvI2X(copy_length), dest_uninitialized);
5354       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
5355       checked_control = control();
5356       checked_i_o     = i_o();
5357       checked_mem     = memory(adr_type);
5358       checked_value   = cv;
5359     }
5360     // At this point we know we do not need type checks on oop stores.
5361 
5362     // Let's see if we need card marks:
5363     if (alloc != NULL && use_ReduceInitialCardMarks() && ! UseShenandoahGC) {
5364       // If we do not need card marks, copy using the jint or jlong stub.
5365       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5366       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5367              "sizes agree");
5368     }
5369   }
5370 
5371   if (!stopped()) {
5372     // Generate the fast path, if possible.
5373     PreserveJVMState pjvms(this);
5374     generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5375                                  src, src_offset, dest, dest_offset,
5376                                  ConvI2X(copy_length), dest_uninitialized);
5377 
5378     // Present the results of the fast call.
5379     result_region->init_req(fast_path, control());
5380     result_i_o   ->init_req(fast_path, i_o());
5381     result_memory->init_req(fast_path, memory(adr_type));
5382   }
5383 


5495     // a subsequent store that would make this object accessible by
5496     // other threads.
5497     // Record what AllocateNode this StoreStore protects so that
5498     // escape analysis can go from the MemBarStoreStoreNode to the
5499     // AllocateNode and eliminate the MemBarStoreStoreNode if possible
5500     // based on the escape status of the AllocateNode.
5501     insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
5502   } else if (InsertMemBarAfterArraycopy)
5503     insert_mem_bar(Op_MemBarCPUOrder);
5504 }
5505 
5506 
5507 // Helper function which determines if an arraycopy immediately follows
5508 // an allocation, with no intervening tests or other escapes for the object.
5509 AllocateArrayNode*
5510 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
5511                                            RegionNode* slow_region) {
5512   if (stopped())             return NULL;  // no fast path
5513   if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
5514 
5515   ptr = ShenandoahBarrierNode::skip_through_barrier(ptr);
5516 
5517   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
5518   if (alloc == NULL)  return NULL;
5519 
5520   Node* rawmem = memory(Compile::AliasIdxRaw);
5521   // Is the allocation's memory state untouched?
5522   if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
5523     // Bail out if there have been raw-memory effects since the allocation.
5524     // (Example:  There might have been a call or safepoint.)
5525     return NULL;
5526   }
5527   rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
5528   if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
5529     return NULL;
5530   }
5531 
5532   // There must be no unexpected observers of this allocation.
5533   for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
5534     Node* obs = ptr->fast_out(i);
5535     if (obs != this->map()) {
5536       return NULL;


5875                           disjoint_bases, copyfunc_name, dest_uninitialized);
5876 
5877   // Call it.  Note that the count_ix value is not scaled to a byte-size.
5878   make_runtime_call(RC_LEAF|RC_NO_FP,
5879                     OptoRuntime::fast_arraycopy_Type(),
5880                     copyfunc_addr, copyfunc_name, adr_type,
5881                     src_start, dest_start, copy_length XTOP);
5882 }
5883 
5884 //-------------inline_encodeISOArray-----------------------------------
5885 // encode char[] to byte[] in ISO_8859_1
5886 bool LibraryCallKit::inline_encodeISOArray() {
5887   assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
5888   // no receiver since it is static method
5889   Node *src         = argument(0);
5890   Node *src_offset  = argument(1);
5891   Node *dst         = argument(2);
5892   Node *dst_offset  = argument(3);
5893   Node *length      = argument(4);
5894 
5895   src = shenandoah_cast_not_null(src);
5896   dst = shenandoah_cast_not_null(dst);
5897 
5898   src = shenandoah_read_barrier(src);
5899   dst = shenandoah_write_barrier(dst);
5900 
5901   const Type* src_type = src->Value(&_gvn);
5902   const Type* dst_type = dst->Value(&_gvn);
5903   const TypeAryPtr* top_src = src_type->isa_aryptr();
5904   const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5905   if (top_src  == NULL || top_src->klass()  == NULL ||
5906       top_dest == NULL || top_dest->klass() == NULL) {
5907     // failed array check
5908     return false;
5909   }
5910 
5911   // Figure out the size and type of the elements we will be copying.
5912   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5913   BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5914   if (src_elem != T_CHAR || dst_elem != T_BYTE) {
5915     return false;
5916   }
5917   Node* src_start = array_element_address(src, src_offset, src_elem);
5918   Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5919   // 'src_start' points to src array + scaled offset
5920   // 'dst_start' points to dst array + scaled offset


5930 
5931 //-------------inline_multiplyToLen-----------------------------------
5932 bool LibraryCallKit::inline_multiplyToLen() {
5933   assert(UseMultiplyToLenIntrinsic, "not implementated on this platform");
5934 
5935   address stubAddr = StubRoutines::multiplyToLen();
5936   if (stubAddr == NULL) {
5937     return false; // Intrinsic's stub is not implemented on this platform
5938   }
5939   const char* stubName = "multiplyToLen";
5940 
5941   assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
5942 
5943   // no receiver because it is a static method
5944   Node* x    = argument(0);
5945   Node* xlen = argument(1);
5946   Node* y    = argument(2);
5947   Node* ylen = argument(3);
5948   Node* z    = argument(4);
5949 
5950   x = shenandoah_cast_not_null(x);
5951   x = shenandoah_read_barrier(x);
5952   y = shenandoah_cast_not_null(y);
5953   y = shenandoah_read_barrier(y);
5954   z = shenandoah_write_barrier(z);
5955 
5956   const Type* x_type = x->Value(&_gvn);
5957   const Type* y_type = y->Value(&_gvn);
5958   const TypeAryPtr* top_x = x_type->isa_aryptr();
5959   const TypeAryPtr* top_y = y_type->isa_aryptr();
5960   if (top_x  == NULL || top_x->klass()  == NULL ||
5961       top_y == NULL || top_y->klass() == NULL) {
5962     // failed array check
5963     return false;
5964   }
5965 
5966   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5967   BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5968   if (x_elem != T_INT || y_elem != T_INT) {
5969     return false;
5970   }
5971 
5972   // Set the original stack and the reexecute bit for the interpreter to reexecute
5973   // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
5974   // on the return from z array allocation in runtime.
5975   { PreserveReexecuteState preexecs(this);


5981     // 'y_start' points to y array + scaled ylen
5982 
5983     // Allocate the result array
5984     Node* zlen = _gvn.transform(new(C) AddINode(xlen, ylen));
5985     ciKlass* klass = ciTypeArrayKlass::make(T_INT);
5986     Node* klass_node = makecon(TypeKlassPtr::make(klass));
5987 
5988     IdealKit ideal(this);
5989 
5990 #define __ ideal.
5991      Node* one = __ ConI(1);
5992      Node* zero = __ ConI(0);
5993      IdealVariable need_alloc(ideal), z_alloc(ideal);  __ declarations_done();
5994      __ set(need_alloc, zero);
5995      __ set(z_alloc, z);
5996      __ if_then(z, BoolTest::eq, null()); {
5997        __ increment (need_alloc, one);
5998      } __ else_(); {
5999        // Update graphKit memory and control from IdealKit.
6000        sync_kit(ideal);
6001        Node* zlen_arg = NULL;
6002        if (UseShenandoahGC) {
6003          Node *cast = new (C) CastPPNode(z, TypePtr::NOTNULL);
6004          cast->init_req(0, control());
6005          _gvn.set_type(cast, cast->bottom_type());
6006          C->record_for_igvn(cast);
6007          zlen_arg = load_array_length(cast);
6008        } else {
6009          zlen_arg = load_array_length(z);
6010        }
6011        // Update IdealKit memory and control from graphKit.
6012        __ sync_kit(this);
6013        __ if_then(zlen_arg, BoolTest::lt, zlen); {
6014          __ increment (need_alloc, one);
6015        } __ end_if();
6016      } __ end_if();
6017 
6018      __ if_then(__ value(need_alloc), BoolTest::ne, zero); {
6019        // Update graphKit memory and control from IdealKit.
6020        sync_kit(ideal);
6021        Node * narr = new_array(klass_node, zlen, 1);
6022        // Update IdealKit memory and control from graphKit.
6023        __ sync_kit(this);
6024        __ set(z_alloc, narr);
6025      } __ end_if();
6026 
6027      sync_kit(ideal);
6028      z = __ value(z_alloc);
6029      // Can't use TypeAryPtr::INTS which uses Bottom offset.
6030      _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));


6045   return true;
6046 }
6047 
6048 //-------------inline_squareToLen------------------------------------
6049 bool LibraryCallKit::inline_squareToLen() {
6050   assert(UseSquareToLenIntrinsic, "not implementated on this platform");
6051 
6052   address stubAddr = StubRoutines::squareToLen();
6053   if (stubAddr == NULL) {
6054     return false; // Intrinsic's stub is not implemented on this platform
6055   }
6056   const char* stubName = "squareToLen";
6057 
6058   assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
6059 
6060   Node* x    = argument(0);
6061   Node* len  = argument(1);
6062   Node* z    = argument(2);
6063   Node* zlen = argument(3);
6064 
6065   x = shenandoah_cast_not_null(x);
6066   x = shenandoah_read_barrier(x);
6067   z = shenandoah_cast_not_null(z);
6068   z = shenandoah_write_barrier(z);
6069 
6070   const Type* x_type = x->Value(&_gvn);
6071   const Type* z_type = z->Value(&_gvn);
6072   const TypeAryPtr* top_x = x_type->isa_aryptr();
6073   const TypeAryPtr* top_z = z_type->isa_aryptr();
6074   if (top_x  == NULL || top_x->klass()  == NULL ||
6075       top_z  == NULL || top_z->klass()  == NULL) {
6076     // failed array check
6077     return false;
6078   }
6079 
6080   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6081   BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6082   if (x_elem != T_INT || z_elem != T_INT) {
6083     return false;
6084   }
6085 
6086 
6087   Node* x_start = array_element_address(x, intcon(0), x_elem);
6088   Node* z_start = array_element_address(z, intcon(0), z_elem);
6089 


6097 }
6098 
6099 //-------------inline_mulAdd------------------------------------------
6100 bool LibraryCallKit::inline_mulAdd() {
6101   assert(UseMulAddIntrinsic, "not implementated on this platform");
6102 
6103   address stubAddr = StubRoutines::mulAdd();
6104   if (stubAddr == NULL) {
6105     return false; // Intrinsic's stub is not implemented on this platform
6106   }
6107   const char* stubName = "mulAdd";
6108 
6109   assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
6110 
6111   Node* out      = argument(0);
6112   Node* in       = argument(1);
6113   Node* offset   = argument(2);
6114   Node* len      = argument(3);
6115   Node* k        = argument(4);
6116 
6117   in = shenandoah_read_barrier(in);
6118   out = shenandoah_cast_not_null(out);
6119   out = shenandoah_write_barrier(out);
6120 
6121   const Type* out_type = out->Value(&_gvn);
6122   const Type* in_type = in->Value(&_gvn);
6123   const TypeAryPtr* top_out = out_type->isa_aryptr();
6124   const TypeAryPtr* top_in = in_type->isa_aryptr();
6125   if (top_out  == NULL || top_out->klass()  == NULL ||
6126       top_in == NULL || top_in->klass() == NULL) {
6127     // failed array check
6128     return false;
6129   }
6130 
6131   BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6132   BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6133   if (out_elem != T_INT || in_elem != T_INT) {
6134     return false;
6135   }
6136 
6137   Node* outlen = load_array_length(out);
6138   Node* new_offset = _gvn.transform(new (C) SubINode(outlen, offset));
6139   Node* out_start = array_element_address(out, intcon(0), out_elem);
6140   Node* in_start = array_element_address(in, intcon(0), in_elem);


6150 
6151 //-------------inline_montgomeryMultiply-----------------------------------
6152 bool LibraryCallKit::inline_montgomeryMultiply() {
6153   address stubAddr = StubRoutines::montgomeryMultiply();
6154   if (stubAddr == NULL) {
6155     return false; // Intrinsic's stub is not implemented on this platform
6156   }
6157 
6158   assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
6159   const char* stubName = "montgomery_multiply";
6160 
6161   assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
6162 
6163   Node* a    = argument(0);
6164   Node* b    = argument(1);
6165   Node* n    = argument(2);
6166   Node* len  = argument(3);
6167   Node* inv  = argument(4);
6168   Node* m    = argument(6);
6169 
6170   a = shenandoah_read_barrier(a);
6171   b = shenandoah_read_barrier(b);
6172   n = shenandoah_read_barrier(n);
6173   m = shenandoah_write_barrier(m);
6174 
6175   const Type* a_type = a->Value(&_gvn);
6176   const TypeAryPtr* top_a = a_type->isa_aryptr();
6177   const Type* b_type = b->Value(&_gvn);
6178   const TypeAryPtr* top_b = b_type->isa_aryptr();
6179   const Type* n_type = a->Value(&_gvn);
6180   const TypeAryPtr* top_n = n_type->isa_aryptr();
6181   const Type* m_type = a->Value(&_gvn);
6182   const TypeAryPtr* top_m = m_type->isa_aryptr();
6183   if (top_a  == NULL || top_a->klass()  == NULL ||
6184       top_b == NULL || top_b->klass()  == NULL ||
6185       top_n == NULL || top_n->klass()  == NULL ||
6186       top_m == NULL || top_m->klass()  == NULL) {
6187     // failed array check
6188     return false;
6189   }
6190 
6191   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6192   BasicType b_elem = b_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6193   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6194   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();


6224   return true;
6225 }
6226 
6227 bool LibraryCallKit::inline_montgomerySquare() {
6228   address stubAddr = StubRoutines::montgomerySquare();
6229   if (stubAddr == NULL) {
6230     return false; // Intrinsic's stub is not implemented on this platform
6231   }
6232 
6233   assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
6234   const char* stubName = "montgomery_square";
6235 
6236   assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
6237 
6238   Node* a    = argument(0);
6239   Node* n    = argument(1);
6240   Node* len  = argument(2);
6241   Node* inv  = argument(3);
6242   Node* m    = argument(5);
6243 
6244   a = shenandoah_read_barrier(a);
6245   n = shenandoah_read_barrier(n);
6246   m = shenandoah_write_barrier(m);
6247 
6248   const Type* a_type = a->Value(&_gvn);
6249   const TypeAryPtr* top_a = a_type->isa_aryptr();
6250   const Type* n_type = a->Value(&_gvn);
6251   const TypeAryPtr* top_n = n_type->isa_aryptr();
6252   const Type* m_type = a->Value(&_gvn);
6253   const TypeAryPtr* top_m = m_type->isa_aryptr();
6254   if (top_a  == NULL || top_a->klass()  == NULL ||
6255       top_n == NULL || top_n->klass()  == NULL ||
6256       top_m == NULL || top_m->klass()  == NULL) {
6257     // failed array check
6258     return false;
6259   }
6260 
6261   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6262   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6263   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6264   if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
6265     return false;
6266   }
6267 


6339   // no receiver since it is static method
6340   Node* crc     = argument(0); // type: int
6341   Node* src     = argument(1); // type: oop
6342   Node* offset  = argument(2); // type: int
6343   Node* length  = argument(3); // type: int
6344 
6345   const Type* src_type = src->Value(&_gvn);
6346   const TypeAryPtr* top_src = src_type->isa_aryptr();
6347   if (top_src  == NULL || top_src->klass()  == NULL) {
6348     // failed array check
6349     return false;
6350   }
6351 
6352   // Figure out the size and type of the elements we will be copying.
6353   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6354   if (src_elem != T_BYTE) {
6355     return false;
6356   }
6357 
6358   // 'src_start' points to src array + scaled offset
6359   src = shenandoah_cast_not_null(src);
6360   src = shenandoah_read_barrier(src);
6361   src = shenandoah_read_barrier(src);
6362   Node* src_start = array_element_address(src, offset, src_elem);
6363 
6364   // We assume that range check is done by caller.
6365   // TODO: generate range check (offset+length < src.length) in debug VM.
6366 
6367   // Call the stub.
6368   address stubAddr = StubRoutines::updateBytesCRC32();
6369   const char *stubName = "updateBytesCRC32";
6370   Node* call;
6371   if (CCallingConventionRequiresIntsAsLongs) {
6372    call =  make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6373                              stubAddr, stubName, TypePtr::BOTTOM,
6374                              crc XTOP, src_start, length XTOP);
6375   } else {
6376     call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6377                              stubAddr, stubName, TypePtr::BOTTOM,
6378                              crc, src_start, length);
6379   }
6380   Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
6381   set_result(result);


6451 
6452   set_result(result);
6453   return true;
6454 }
6455 
6456 
6457 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6458                                               bool is_exact=true, bool is_static=false) {
6459 
6460   const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6461   assert(tinst != NULL, "obj is null");
6462   assert(tinst->klass()->is_loaded(), "obj is not loaded");
6463   assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
6464 
6465   ciField* field = tinst->klass()->as_instance_klass()->get_field_by_name(ciSymbol::make(fieldName),
6466                                                                           ciSymbol::make(fieldTypeString),
6467                                                                           is_static);
6468   if (field == NULL) return (Node *) NULL;
6469   assert (field != NULL, "undefined field");
6470 
6471   if ((ShenandoahOptimizeStaticFinals   && field->is_static()  && field->is_final()) ||
6472       (ShenandoahOptimizeInstanceFinals && !field->is_static() && field->is_final()) ||
6473       (ShenandoahOptimizeStableFinals   && field->is_stable())) {
6474     // Skip the barrier for special fields
6475   } else {
6476     fromObj = shenandoah_read_barrier(fromObj);
6477   }
6478 
6479   // Next code  copied from Parse::do_get_xxx():
6480 
6481   // Compute address and memory type.
6482   int offset  = field->offset_in_bytes();
6483   bool is_vol = field->is_volatile();
6484   ciType* field_klass = field->type();
6485   assert(field_klass->is_loaded(), "should be loaded");
6486   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6487   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6488   BasicType bt = field->layout_type();
6489 
6490   // Build the resultant type of the load
6491   const Type *type;
6492   if (bt == T_OBJECT) {
6493     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6494   } else {
6495     type = Type::get_const_basic_type(bt);
6496   }
6497 
6498   Node* leading_membar = NULL;
6499   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6500     leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6501   }
6502   // Build the load.
6503   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6504   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
6505   // If reference is volatile, prevent following memory ops from
6506   // floating up past the volatile read.  Also prevents commoning
6507   // another volatile read.
6508   if (is_vol) {
6509     // Memory barrier includes bogus read of value to force load BEFORE membar
6510     Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField);
6511     mb->as_MemBar()->set_trailing_load();
6512   }
6513   return loadedField;
6514 }
6515 
6516 
6517 //------------------------------inline_aescrypt_Block-----------------------
6518 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6519   address stubAddr = NULL;
6520   const char *stubName;
6521   assert(UseAES, "need AES instruction support");
6522 
6523   switch(id) {
6524   case vmIntrinsics::_aescrypt_encryptBlock:
6525     stubAddr = StubRoutines::aescrypt_encryptBlock();
6526     stubName = "aescrypt_encryptBlock";
6527     break;
6528   case vmIntrinsics::_aescrypt_decryptBlock:
6529     stubAddr = StubRoutines::aescrypt_decryptBlock();
6530     stubName = "aescrypt_decryptBlock";
6531     break;
6532   }
6533   if (stubAddr == NULL) return false;
6534 
6535   Node* aescrypt_object = argument(0);
6536   Node* src             = argument(1);
6537   Node* src_offset      = argument(2);
6538   Node* dest            = argument(3);
6539   Node* dest_offset     = argument(4);
6540 
6541   // Resolve src and dest arrays for ShenandoahGC.
6542   src = shenandoah_cast_not_null(src);
6543   src = shenandoah_read_barrier(src);
6544   dest = shenandoah_cast_not_null(dest);
6545   dest = shenandoah_write_barrier(dest);
6546 
6547   // (1) src and dest are arrays.
6548   const Type* src_type = src->Value(&_gvn);
6549   const Type* dest_type = dest->Value(&_gvn);
6550   const TypeAryPtr* top_src = src_type->isa_aryptr();
6551   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6552   assert (top_src  != NULL && top_src->klass()  != NULL &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6553 
6554   // for the quick and dirty code we will skip all the checks.
6555   // we are just trying to get the call to be generated.
6556   Node* src_start  = src;
6557   Node* dest_start = dest;
6558   if (src_offset != NULL || dest_offset != NULL) {
6559     assert(src_offset != NULL && dest_offset != NULL, "");
6560     src_start  = array_element_address(src,  src_offset,  T_BYTE);
6561     dest_start = array_element_address(dest, dest_offset, T_BYTE);
6562   }
6563 
6564   // now need to get the start of its expanded key array
6565   // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6566   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);


6595 
6596   switch(id) {
6597   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
6598     stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
6599     stubName = "cipherBlockChaining_encryptAESCrypt";
6600     break;
6601   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
6602     stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
6603     stubName = "cipherBlockChaining_decryptAESCrypt";
6604     break;
6605   }
6606   if (stubAddr == NULL) return false;
6607 
6608   Node* cipherBlockChaining_object = argument(0);
6609   Node* src                        = argument(1);
6610   Node* src_offset                 = argument(2);
6611   Node* len                        = argument(3);
6612   Node* dest                       = argument(4);
6613   Node* dest_offset                = argument(5);
6614 
6615   // inline_cipherBlockChaining_AESCrypt_predicate() has its own
6616   // barrier. This one should optimize away.
6617   src = shenandoah_cast_not_null(src);
6618   dest = shenandoah_cast_not_null(dest);
6619   src = shenandoah_read_barrier(src);
6620   dest = shenandoah_write_barrier(dest);
6621 
6622   // (1) src and dest are arrays.
6623   const Type* src_type = src->Value(&_gvn);
6624   const Type* dest_type = dest->Value(&_gvn);
6625   const TypeAryPtr* top_src = src_type->isa_aryptr();
6626   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6627   assert (top_src  != NULL && top_src->klass()  != NULL
6628           &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6629 
6630   // checks are the responsibility of the caller
6631   Node* src_start  = src;
6632   Node* dest_start = dest;
6633   if (src_offset != NULL || dest_offset != NULL) {
6634     assert(src_offset != NULL && dest_offset != NULL, "");
6635     src_start  = array_element_address(src,  src_offset,  T_BYTE);
6636     dest_start = array_element_address(dest, dest_offset, T_BYTE);
6637   }
6638 
6639   // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6640   // (because of the predicated logic executed earlier).
6641   // so we cast it here safely.


6646 
6647   // cast it to what we know it will be at runtime
6648   const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
6649   assert(tinst != NULL, "CBC obj is null");
6650   assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
6651   ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6652   assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6653 
6654   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6655   const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6656   const TypeOopPtr* xtype = aklass->as_instance_type();
6657   Node* aescrypt_object = new(C) CheckCastPPNode(control(), embeddedCipherObj, xtype);
6658   aescrypt_object = _gvn.transform(aescrypt_object);
6659 
6660   // we need to get the start of the aescrypt_object's expanded key array
6661   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6662   if (k_start == NULL) return false;
6663 
6664   // similarly, get the start address of the r vector
6665   Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
6666 
6667   objRvec = shenandoah_write_barrier(objRvec);
6668 
6669   if (objRvec == NULL) return false;
6670   Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
6671 
6672   Node* cbcCrypt;
6673   if (Matcher::pass_original_key_for_aes()) {
6674     // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
6675     // compatibility issues between Java key expansion and SPARC crypto instructions
6676     Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
6677     if (original_k_start == NULL) return false;
6678 
6679     // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
6680     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6681                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6682                                  stubAddr, stubName, TypePtr::BOTTOM,
6683                                  src_start, dest_start, k_start, r_start, len, original_k_start);
6684   } else {
6685     // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6686     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6687                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6688                                  stubAddr, stubName, TypePtr::BOTTOM,


6697 
6698 //------------------------------get_key_start_from_aescrypt_object-----------------------
6699 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6700 #ifdef PPC64
6701   // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
6702   // Intel's extention is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
6703   // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
6704   // The ppc64 stubs of encryption and decryption use the same round keys (sessionK[0]).
6705   Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I", /*is_exact*/ false);
6706   assert (objSessionK != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6707   if (objSessionK == NULL) {
6708     return (Node *) NULL;
6709   }
6710   Node* objAESCryptKey = load_array_element(control(), objSessionK, intcon(0), TypeAryPtr::OOPS);
6711 #else
6712   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
6713 #endif // PPC64
6714   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6715   if (objAESCryptKey == NULL) return (Node *) NULL;
6716 
6717   objAESCryptKey = shenandoah_read_barrier(objAESCryptKey);
6718 
6719   // now have the array, need to get the start address of the K array
6720   Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6721   return k_start;
6722 }
6723 
6724 //------------------------------get_original_key_start_from_aescrypt_object-----------------------
6725 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6726   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6727   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6728   if (objAESCryptKey == NULL) return (Node *) NULL;
6729 
6730   objAESCryptKey = shenandoah_read_barrier(objAESCryptKey);
6731 
6732   // now have the array, need to get the start address of the lastKey array
6733   Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6734   return original_k_start;
6735 }
6736 
6737 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6738 // Return node representing slow path of predicate check.
6739 // the pseudo code we want to emulate with this predicate is:
6740 // for encryption:
6741 //    if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6742 // for decryption:
6743 //    if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6744 //    note cipher==plain is more conservative than the original java code but that's OK
6745 //
6746 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
6747   // The receiver was checked for NULL already.
6748   Node* objCBC = argument(0);
6749 
6750   Node* src = argument(1);
6751   Node* dest = argument(4);
6752 
6753   // Load embeddedCipher field of CipherBlockChaining object.
6754   Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6755 
6756   // get AESCrypt klass for instanceOf check
6757   // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6758   // will have same classloader as CipherBlockChaining object
6759   const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6760   assert(tinst != NULL, "CBCobj is null");
6761   assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6762 
6763   // we want to do an instanceof comparison against the AESCrypt class
6764   ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6765   if (!klass_AESCrypt->is_loaded()) {
6766     // if AESCrypt is not even loaded, we never take the intrinsic fast path
6767     Node* ctrl = control();
6768     set_control(top()); // no regular fast path
6769     return ctrl;
6770   }
6771 
6772   // Resolve src and dest arrays for ShenandoahGC.  Here because new
6773   // memory state is not handled by predicate logic in
6774   // inline_cipherBlockChaining_AESCrypt itself
6775   src = shenandoah_cast_not_null(src);
6776   dest = shenandoah_cast_not_null(dest);
6777   src = shenandoah_write_barrier(src);
6778   dest = shenandoah_write_barrier(dest);
6779 
6780   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6781 
6782   Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6783   Node* cmp_instof  = _gvn.transform(new (C) CmpINode(instof, intcon(1)));
6784   Node* bool_instof  = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
6785 
6786   Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6787 
6788   // for encryption, we are done
6789   if (!decrypting)
6790     return instof_false;  // even if it is NULL
6791 
6792   // for decryption, we need to add a further check to avoid
6793   // taking the intrinsic path when cipher and plain are the same
6794   // see the original java code for why.
6795   RegionNode* region = new(C) RegionNode(3);
6796   region->init_req(1, instof_false);
6797 

6798   Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest));
6799   Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq));
6800   Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
6801   region->init_req(2, src_dest_conjoint);
6802 
6803   record_for_igvn(region);
6804   return _gvn.transform(region);
6805 }
6806 
6807 //------------------------------inline_sha_implCompress-----------------------
6808 //
6809 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
6810 // void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
6811 //
6812 // Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
6813 // void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
6814 //
6815 // Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
6816 // void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
6817 //
6818 bool LibraryCallKit::inline_sha_implCompress(vmIntrinsics::ID id) {
6819   assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
6820 
6821   Node* sha_obj = argument(0);
6822   Node* src     = argument(1); // type oop
6823   Node* ofs     = argument(2); // type int
6824 
6825   const Type* src_type = src->Value(&_gvn);
6826   const TypeAryPtr* top_src = src_type->isa_aryptr();
6827   if (top_src  == NULL || top_src->klass()  == NULL) {
6828     // failed array check
6829     return false;
6830   }
6831   // Figure out the size and type of the elements we will be copying.
6832   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6833   if (src_elem != T_BYTE) {
6834     return false;
6835   }
6836   // 'src_start' points to src array + offset
6837   src = cast_not_null(src, false);
6838   src = shenandoah_read_barrier(src);
6839   Node* src_start = array_element_address(src, ofs, src_elem);
6840   Node* state = NULL;
6841   address stubAddr;
6842   const char *stubName;
6843 
6844   switch(id) {
6845   case vmIntrinsics::_sha_implCompress:
6846     assert(UseSHA1Intrinsics, "need SHA1 instruction support");
6847     state = get_state_from_sha_object(sha_obj);
6848     stubAddr = StubRoutines::sha1_implCompress();
6849     stubName = "sha1_implCompress";
6850     break;
6851   case vmIntrinsics::_sha2_implCompress:
6852     assert(UseSHA256Intrinsics, "need SHA256 instruction support");
6853     state = get_state_from_sha_object(sha_obj);
6854     stubAddr = StubRoutines::sha256_implCompress();
6855     stubName = "sha256_implCompress";
6856     break;
6857   case vmIntrinsics::_sha5_implCompress:
6858     assert(UseSHA512Intrinsics, "need SHA512 instruction support");


6885   assert((uint)predicate < 3, "sanity");
6886   assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
6887 
6888   Node* digestBase_obj = argument(0); // The receiver was checked for NULL already.
6889   Node* src            = argument(1); // byte[] array
6890   Node* ofs            = argument(2); // type int
6891   Node* limit          = argument(3); // type int
6892 
6893   const Type* src_type = src->Value(&_gvn);
6894   const TypeAryPtr* top_src = src_type->isa_aryptr();
6895   if (top_src  == NULL || top_src->klass()  == NULL) {
6896     // failed array check
6897     return false;
6898   }
6899   // Figure out the size and type of the elements we will be copying.
6900   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6901   if (src_elem != T_BYTE) {
6902     return false;
6903   }
6904   // 'src_start' points to src array + offset
6905   src = shenandoah_cast_not_null(src);
6906   src = shenandoah_read_barrier(src);
6907   Node* src_start = array_element_address(src, ofs, src_elem);
6908 
6909   const char* klass_SHA_name = NULL;
6910   const char* stub_name = NULL;
6911   address     stub_addr = NULL;
6912   bool        long_state = false;
6913 
6914   switch (predicate) {
6915   case 0:
6916     if (UseSHA1Intrinsics) {
6917       klass_SHA_name = "sun/security/provider/SHA";
6918       stub_name = "sha1_implCompressMB";
6919       stub_addr = StubRoutines::sha1_implCompressMB();
6920     }
6921     break;
6922   case 1:
6923     if (UseSHA256Intrinsics) {
6924       klass_SHA_name = "sun/security/provider/SHA2";
6925       stub_name = "sha256_implCompressMB";
6926       stub_addr = StubRoutines::sha256_implCompressMB();


6976                              src_start, state, ofs XTOP, limit XTOP);
6977   } else {
6978     call = make_runtime_call(RC_LEAF|RC_NO_FP,
6979                              OptoRuntime::digestBase_implCompressMB_Type(),
6980                              stubAddr, stubName, TypePtr::BOTTOM,
6981                              src_start, state, ofs, limit);
6982   }
6983   // return ofs (int)
6984   Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
6985   set_result(result);
6986 
6987   return true;
6988 }
6989 
6990 //------------------------------get_state_from_sha_object-----------------------
6991 Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
6992   Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
6993   assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
6994   if (sha_state == NULL) return (Node *) NULL;
6995 
6996   sha_state = shenandoah_write_barrier(sha_state);
6997 
6998   // now have the array, need to get the start address of the state array
6999   Node* state = array_element_address(sha_state, intcon(0), T_INT);
7000   return state;
7001 }
7002 
7003 //------------------------------get_state_from_sha5_object-----------------------
7004 Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
7005   Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
7006   assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
7007   if (sha_state == NULL) return (Node *) NULL;
7008 
7009   sha_state = shenandoah_write_barrier(sha_state);
7010 
7011   // now have the array, need to get the start address of the state array
7012   Node* state = array_element_address(sha_state, intcon(0), T_LONG);
7013   return state;
7014 }
7015 
7016 //----------------------------inline_digestBase_implCompressMB_predicate----------------------------
7017 // Return node representing slow path of predicate check.
7018 // the pseudo code we want to emulate with this predicate is:
7019 //    if (digestBaseObj instanceof SHA/SHA2/SHA5) do_intrinsic, else do_javapath
7020 //
7021 Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
7022   assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
7023          "need SHA1/SHA256/SHA512 instruction support");
7024   assert((uint)predicate < 3, "sanity");
7025 
7026   // The receiver was checked for NULL already.
7027   Node* digestBaseObj = argument(0);
7028 
7029   // get DigestBase klass for instanceOf check


< prev index next >