1 /*
   2  * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"
  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/c1/barrierSetC1.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/vm_version.hpp"
  43 #include "utilities/bitMap.inline.hpp"
  44 #include "utilities/macros.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 
  47 #ifdef ASSERT
  48 #define __ gen()->lir(__FILE__, __LINE__)->
  49 #else
  50 #define __ gen()->lir()->
  51 #endif
  52 
  53 #ifndef PATCHED_ADDR
  54 #define PATCHED_ADDR  (max_jint)
  55 #endif
  56 
  57 void PhiResolverState::reset() {
  58   _virtual_operands.clear();
  59   _other_operands.clear();
  60   _vreg_table.clear();
  61 }
  62 
  63 
  64 //--------------------------------------------------------------
  65 // PhiResolver
  66 
  67 // Resolves cycles:
  68 //
  69 //  r1 := r2  becomes  temp := r1
  70 //  r2 := r1           r1 := r2
  71 //                     r2 := temp
  72 // and orders moves:
  73 //
  74 //  r2 := r3  becomes  r1 := r2
  75 //  r1 := r2           r2 := r3
  76 
  77 PhiResolver::PhiResolver(LIRGenerator* gen)
  78  : _gen(gen)
  79  , _state(gen->resolver_state())
  80  , _loop(nullptr)
  81  , _temp(LIR_OprFact::illegalOpr)
  82 {
  83   // reinitialize the shared state arrays
  84   _state.reset();
  85 }
  86 
  87 
  88 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
  89   assert(src->is_valid(), "");
  90   assert(dest->is_valid(), "");
  91   __ move(src, dest);
  92 }
  93 
  94 
  95 void PhiResolver::move_temp_to(LIR_Opr dest) {
  96   assert(_temp->is_valid(), "");
  97   emit_move(_temp, dest);
  98   NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
  99 }
 100 
 101 
 102 void PhiResolver::move_to_temp(LIR_Opr src) {
 103   assert(_temp->is_illegal(), "");
 104   _temp = _gen->new_register(src->type());
 105   emit_move(src, _temp);
 106 }
 107 
 108 
 109 // Traverse assignment graph in depth first order and generate moves in post order
 110 // ie. two assignments: b := c, a := b start with node c:
 111 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
 112 // Generates moves in this order: move b to a and move c to b
 113 // ie. cycle a := b, b := a start with node a
 114 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
 115 // Generates moves in this order: move b to temp, move a to b, move temp to a
 116 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
 117   if (!dest->visited()) {
 118     dest->set_visited();
 119     for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
 120       move(dest, dest->destination_at(i));
 121     }
 122   } else if (!dest->start_node()) {
 123     // cylce in graph detected
 124     assert(_loop == NULL, "only one loop valid!");
 125     _loop = dest;
 126     move_to_temp(src->operand());
 127     return;
 128   } // else dest is a start node
 129 
 130   if (!dest->assigned()) {
 131     if (_loop == dest) {
 132       move_temp_to(dest->operand());
 133       dest->set_assigned();
 134     } else if (src != NULL) {
 135       emit_move(src->operand(), dest->operand());
 136       dest->set_assigned();
 137     }
 138   }
 139 }
 140 
 141 
 142 PhiResolver::~PhiResolver() {
 143   int i;
 144   // resolve any cycles in moves from and to virtual registers
 145   for (i = virtual_operands().length() - 1; i >= 0; i --) {
 146     ResolveNode* node = virtual_operands().at(i);
 147     if (!node->visited()) {
 148       _loop = NULL;
 149       move(NULL, node);
 150       node->set_start_node();
 151       assert(_temp->is_illegal(), "move_temp_to() call missing");
 152     }
 153   }
 154 
 155   // generate move for move from non virtual register to abitrary destination
 156   for (i = other_operands().length() - 1; i >= 0; i --) {
 157     ResolveNode* node = other_operands().at(i);
 158     for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
 159       emit_move(node->operand(), node->destination_at(j)->operand());
 160     }
 161   }
 162 }
 163 
 164 
 165 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
 166   ResolveNode* node;
 167   if (opr->is_virtual()) {
 168     int vreg_num = opr->vreg_number();
 169     node = vreg_table().at_grow(vreg_num, NULL);
 170     assert(node == NULL || node->operand() == opr, "");
 171     if (node == NULL) {
 172       node = new ResolveNode(opr);
 173       vreg_table().at_put(vreg_num, node);
 174     }
 175     // Make sure that all virtual operands show up in the list when
 176     // they are used as the source of a move.
 177     if (source && !virtual_operands().contains(node)) {
 178       virtual_operands().append(node);
 179     }
 180   } else {
 181     assert(source, "");
 182     node = new ResolveNode(opr);
 183     other_operands().append(node);
 184   }
 185   return node;
 186 }
 187 
 188 
 189 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
 190   assert(dest->is_virtual(), "");
 191   // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
 192   assert(src->is_valid(), "");
 193   assert(dest->is_valid(), "");
 194   ResolveNode* source = source_node(src);
 195   source->append(destination_node(dest));
 196 }
 197 
 198 
 199 //--------------------------------------------------------------
 200 // LIRItem
 201 
 202 void LIRItem::set_result(LIR_Opr opr) {
 203   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 204   value()->set_operand(opr);
 205 
 206   if (opr->is_virtual()) {
 207     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
 208   }
 209 
 210   _result = opr;
 211 }
 212 
 213 void LIRItem::load_item() {
 214   if (result()->is_illegal()) {
 215     // update the items result
 216     _result = value()->operand();
 217   }
 218   if (!result()->is_register()) {
 219     LIR_Opr reg = _gen->new_register(value()->type());
 220     __ move(result(), reg);
 221     if (result()->is_constant()) {
 222       _result = reg;
 223     } else {
 224       set_result(reg);
 225     }
 226   }
 227 }
 228 
 229 
 230 void LIRItem::load_for_store(BasicType type) {
 231   if (_gen->can_store_as_constant(value(), type)) {
 232     _result = value()->operand();
 233     if (!_result->is_constant()) {
 234       _result = LIR_OprFact::value_type(value()->type());
 235     }
 236   } else if (type == T_BYTE || type == T_BOOLEAN) {
 237     load_byte_item();
 238   } else {
 239     load_item();
 240   }
 241 }
 242 
 243 void LIRItem::load_item_force(LIR_Opr reg) {
 244   LIR_Opr r = result();
 245   if (r != reg) {
 246 #if !defined(ARM) && !defined(E500V2)
 247     if (r->type() != reg->type()) {
 248       // moves between different types need an intervening spill slot
 249       r = _gen->force_to_spill(r, reg->type());
 250     }
 251 #endif
 252     __ move(r, reg);
 253     _result = reg;
 254   }
 255 }
 256 
 257 ciObject* LIRItem::get_jobject_constant() const {
 258   ObjectType* oc = type()->as_ObjectType();
 259   if (oc) {
 260     return oc->constant_value();
 261   }
 262   return NULL;
 263 }
 264 
 265 
 266 jint LIRItem::get_jint_constant() const {
 267   assert(is_constant() && value() != NULL, "");
 268   assert(type()->as_IntConstant() != NULL, "type check");
 269   return type()->as_IntConstant()->value();
 270 }
 271 
 272 
 273 jint LIRItem::get_address_constant() const {
 274   assert(is_constant() && value() != NULL, "");
 275   assert(type()->as_AddressConstant() != NULL, "type check");
 276   return type()->as_AddressConstant()->value();
 277 }
 278 
 279 
 280 jfloat LIRItem::get_jfloat_constant() const {
 281   assert(is_constant() && value() != NULL, "");
 282   assert(type()->as_FloatConstant() != NULL, "type check");
 283   return type()->as_FloatConstant()->value();
 284 }
 285 
 286 
 287 jdouble LIRItem::get_jdouble_constant() const {
 288   assert(is_constant() && value() != NULL, "");
 289   assert(type()->as_DoubleConstant() != NULL, "type check");
 290   return type()->as_DoubleConstant()->value();
 291 }
 292 
 293 
 294 jlong LIRItem::get_jlong_constant() const {
 295   assert(is_constant() && value() != NULL, "");
 296   assert(type()->as_LongConstant() != NULL, "type check");
 297   return type()->as_LongConstant()->value();
 298 }
 299 
 300 
 301 
 302 //--------------------------------------------------------------
 303 
 304 
 305 void LIRGenerator::block_do_prolog(BlockBegin* block) {
 306 #ifndef PRODUCT
 307   if (PrintIRWithLIR) {
 308     block->print();
 309   }
 310 #endif
 311 
 312   // set up the list of LIR instructions
 313   assert(block->lir() == NULL, "LIR list already computed for this block");
 314   _lir = new LIR_List(compilation(), block);
 315   block->set_lir(_lir);
 316 
 317   __ branch_destination(block->label());
 318 
 319   if (LIRTraceExecution &&
 320       Compilation::current()->hir()->start()->block_id() != block->block_id() &&
 321       !block->is_set(BlockBegin::exception_entry_flag)) {
 322     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
 323     trace_block_entry(block);
 324   }
 325 }
 326 
 327 
 328 void LIRGenerator::block_do_epilog(BlockBegin* block) {
 329 #ifndef PRODUCT
 330   if (PrintIRWithLIR) {
 331     tty->cr();
 332   }
 333 #endif
 334 
 335   // LIR_Opr for unpinned constants shouldn't be referenced by other
 336   // blocks so clear them out after processing the block.
 337   for (int i = 0; i < _unpinned_constants.length(); i++) {
 338     _unpinned_constants.at(i)->clear_operand();
 339   }
 340   _unpinned_constants.trunc_to(0);
 341 
 342   // clear our any registers for other local constants
 343   _constants.trunc_to(0);
 344   _reg_for_constants.trunc_to(0);
 345 }
 346 
 347 
 348 void LIRGenerator::block_do(BlockBegin* block) {
 349   CHECK_BAILOUT();
 350 
 351   block_do_prolog(block);
 352   set_block(block);
 353 
 354   for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
 355     if (instr->is_pinned()) do_root(instr);
 356   }
 357 
 358   set_block(NULL);
 359   block_do_epilog(block);
 360 }
 361 
 362 
 363 //-------------------------LIRGenerator-----------------------------
 364 
 365 // This is where the tree-walk starts; instr must be root;
 366 void LIRGenerator::do_root(Value instr) {
 367   CHECK_BAILOUT();
 368 
 369   InstructionMark im(compilation(), instr);
 370 
 371   assert(instr->is_pinned(), "use only with roots");
 372   assert(instr->subst() == instr, "shouldn't have missed substitution");
 373 
 374   instr->visit(this);
 375 
 376   assert(!instr->has_uses() || instr->operand()->is_valid() ||
 377          instr->as_Constant() != NULL || bailed_out(), "invalid item set");
 378 }
 379 
 380 
 381 // This is called for each node in tree; the walk stops if a root is reached
 382 void LIRGenerator::walk(Value instr) {
 383   InstructionMark im(compilation(), instr);
 384   //stop walk when encounter a root
 385   if ((instr->is_pinned() && instr->as_Phi() == NULL) || instr->operand()->is_valid()) {
 386     assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
 387   } else {
 388     assert(instr->subst() == instr, "shouldn't have missed substitution");
 389     instr->visit(this);
 390     // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
 391   }
 392 }
 393 
 394 
 395 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
 396   assert(state != NULL, "state must be defined");
 397 
 398 #ifndef PRODUCT
 399   state->verify();
 400 #endif
 401 
 402   ValueStack* s = state;
 403   for_each_state(s) {
 404     if (s->kind() == ValueStack::EmptyExceptionState) {
 405       assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
 406       continue;
 407     }
 408 
 409     int index;
 410     Value value;
 411     for_each_stack_value(s, index, value) {
 412       assert(value->subst() == value, "missed substitution");
 413       if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 414         walk(value);
 415         assert(value->operand()->is_valid(), "must be evaluated now");
 416       }
 417     }
 418 
 419     int bci = s->bci();
 420     IRScope* scope = s->scope();
 421     ciMethod* method = scope->method();
 422 
 423     MethodLivenessResult liveness = method->liveness_at_bci(bci);
 424     if (bci == SynchronizationEntryBCI) {
 425       if (x->as_ExceptionObject() || x->as_Throw()) {
 426         // all locals are dead on exit from the synthetic unlocker
 427         liveness.clear();
 428       } else {
 429         assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
 430       }
 431     }
 432     if (!liveness.is_valid()) {
 433       // Degenerate or breakpointed method.
 434       bailout("Degenerate or breakpointed method");
 435     } else {
 436       assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
 437       for_each_local_value(s, index, value) {
 438         assert(value->subst() == value, "missed substition");
 439         if (liveness.at(index) && !value->type()->is_illegal()) {
 440           if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 441             walk(value);
 442             assert(value->operand()->is_valid(), "must be evaluated now");
 443           }
 444         } else {
 445           // NULL out this local so that linear scan can assume that all non-NULL values are live.
 446           s->invalidate_local(index);
 447         }
 448       }
 449     }
 450   }
 451 
 452   return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
 453 }
 454 
 455 
 456 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
 457   return state_for(x, x->exception_state());
 458 }
 459 
 460 
 461 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
 462   /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation
 463    * is active and the class hasn't yet been resolved we need to emit a patch that resolves
 464    * the class. */
 465   if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
 466     assert(info != NULL, "info must be set if class is not loaded");
 467     __ klass2reg_patch(NULL, r, info);
 468   } else {
 469     // no patching needed
 470     __ metadata2reg(obj->constant_encoding(), r);
 471   }
 472 }
 473 
 474 
 475 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
 476                                     CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
 477   CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
 478   if (index->is_constant()) {
 479     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
 480                 index->as_jint(), null_check_info);
 481     __ branch(lir_cond_belowEqual, stub); // forward branch
 482   } else {
 483     cmp_reg_mem(lir_cond_aboveEqual, index, array,
 484                 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
 485     __ branch(lir_cond_aboveEqual, stub); // forward branch
 486   }
 487 }
 488 
 489 
 490 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
 491   CodeStub* stub = new RangeCheckStub(info, index);
 492   if (index->is_constant()) {
 493     cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
 494     __ branch(lir_cond_belowEqual, stub); // forward branch
 495   } else {
 496     cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
 497                 java_nio_Buffer::limit_offset(), T_INT, info);
 498     __ branch(lir_cond_aboveEqual, stub); // forward branch
 499   }
 500   __ move(index, result);
 501 }
 502 
 503 
 504 
 505 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp_op, CodeEmitInfo* info) {
 506   LIR_Opr result_op = result;
 507   LIR_Opr left_op   = left;
 508   LIR_Opr right_op  = right;
 509 
 510   if (TwoOperandLIRForm && left_op != result_op) {
 511     assert(right_op != result_op, "malformed");
 512     __ move(left_op, result_op);
 513     left_op = result_op;
 514   }
 515 
 516   switch(code) {
 517     case Bytecodes::_dadd:
 518     case Bytecodes::_fadd:
 519     case Bytecodes::_ladd:
 520     case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
 521     case Bytecodes::_fmul:
 522     case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
 523 
 524     case Bytecodes::_dmul:  __ mul(left_op, right_op, result_op, tmp_op); break;
 525 
 526     case Bytecodes::_imul:
 527       {
 528         bool did_strength_reduce = false;
 529 
 530         if (right->is_constant()) {
 531           jint c = right->as_jint();
 532           if (c > 0 && is_power_of_2(c)) {
 533             // do not need tmp here
 534             __ shift_left(left_op, exact_log2(c), result_op);
 535             did_strength_reduce = true;
 536           } else {
 537             did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
 538           }
 539         }
 540         // we couldn't strength reduce so just emit the multiply
 541         if (!did_strength_reduce) {
 542           __ mul(left_op, right_op, result_op);
 543         }
 544       }
 545       break;
 546 
 547     case Bytecodes::_dsub:
 548     case Bytecodes::_fsub:
 549     case Bytecodes::_lsub:
 550     case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
 551 
 552     case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
 553     // ldiv and lrem are implemented with a direct runtime call
 554 
 555     case Bytecodes::_ddiv: __ div(left_op, right_op, result_op, tmp_op); break;
 556 
 557     case Bytecodes::_drem:
 558     case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
 559 
 560     default: ShouldNotReachHere();
 561   }
 562 }
 563 
 564 
 565 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 566   arithmetic_op(code, result, left, right, tmp);
 567 }
 568 
 569 
 570 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
 571   arithmetic_op(code, result, left, right, LIR_OprFact::illegalOpr, info);
 572 }
 573 
 574 
 575 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 576   arithmetic_op(code, result, left, right, tmp);
 577 }
 578 
 579 
 580 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
 581 
 582   if (TwoOperandLIRForm && value != result_op
 583       // Only 32bit right shifts require two operand form on S390.
 584       S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {
 585     assert(count != result_op, "malformed");
 586     __ move(value, result_op);
 587     value = result_op;
 588   }
 589 
 590   assert(count->is_constant() || count->is_register(), "must be");
 591   switch(code) {
 592   case Bytecodes::_ishl:
 593   case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
 594   case Bytecodes::_ishr:
 595   case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
 596   case Bytecodes::_iushr:
 597   case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
 598   default: ShouldNotReachHere();
 599   }
 600 }
 601 
 602 
 603 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
 604   if (TwoOperandLIRForm && left_op != result_op) {
 605     assert(right_op != result_op, "malformed");
 606     __ move(left_op, result_op);
 607     left_op = result_op;
 608   }
 609 
 610   switch(code) {
 611     case Bytecodes::_iand:
 612     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 613 
 614     case Bytecodes::_ior:
 615     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 616 
 617     case Bytecodes::_ixor:
 618     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 619 
 620     default: ShouldNotReachHere();
 621   }
 622 }
 623 
 624 
 625 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
 626   if (!GenerateSynchronizationCode) return;
 627   // for slow path, use debug info for state after successful locking
 628   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
 629   __ load_stack_address_monitor(monitor_no, lock);
 630   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 631   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
 632 }
 633 
 634 
 635 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 636   if (!GenerateSynchronizationCode) return;
 637   // setup registers
 638   LIR_Opr hdr = lock;
 639   lock = new_hdr;
 640   CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
 641   __ load_stack_address_monitor(monitor_no, lock);
 642   __ unlock_object(hdr, object, lock, scratch, slow_path);
 643 }
 644 
 645 #ifndef PRODUCT
 646 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 647   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 648     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 649   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 650     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 651   }
 652 }
 653 #endif
 654 
 655 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 656   klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 657   // If klass is not loaded we do not know if the klass has finalizers:
 658   if (UseFastNewInstance && klass->is_loaded()
 659       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 660 
 661     Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
 662 
 663     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 664 
 665     assert(klass->is_loaded(), "must be loaded");
 666     // allocate space for instance
 667     assert(klass->size_helper() > 0, "illegal instance size");
 668     const int instance_size = align_object_size(klass->size_helper());
 669     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 670                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 671   } else {
 672     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
 673     __ branch(lir_cond_always, slow_path);
 674     __ branch_destination(slow_path->continuation());
 675   }
 676 }
 677 
 678 
 679 static bool is_constant_zero(Instruction* inst) {
 680   IntConstant* c = inst->type()->as_IntConstant();
 681   if (c) {
 682     return (c->value() == 0);
 683   }
 684   return false;
 685 }
 686 
 687 
 688 static bool positive_constant(Instruction* inst) {
 689   IntConstant* c = inst->type()->as_IntConstant();
 690   if (c) {
 691     return (c->value() >= 0);
 692   }
 693   return false;
 694 }
 695 
 696 
 697 static ciArrayKlass* as_array_klass(ciType* type) {
 698   if (type != NULL && type->is_array_klass() && type->is_loaded()) {
 699     return (ciArrayKlass*)type;
 700   } else {
 701     return NULL;
 702   }
 703 }
 704 
 705 static ciType* phi_declared_type(Phi* phi) {
 706   ciType* t = phi->operand_at(0)->declared_type();
 707   if (t == NULL) {
 708     return NULL;
 709   }
 710   for(int i = 1; i < phi->operand_count(); i++) {
 711     if (t != phi->operand_at(i)->declared_type()) {
 712       return NULL;
 713     }
 714   }
 715   return t;
 716 }
 717 
 718 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
 719   Instruction* src     = x->argument_at(0);
 720   Instruction* src_pos = x->argument_at(1);
 721   Instruction* dst     = x->argument_at(2);
 722   Instruction* dst_pos = x->argument_at(3);
 723   Instruction* length  = x->argument_at(4);
 724 
 725   // first try to identify the likely type of the arrays involved
 726   ciArrayKlass* expected_type = NULL;
 727   bool is_exact = false, src_objarray = false, dst_objarray = false;
 728   {
 729     ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
 730     ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
 731     Phi* phi;
 732     if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
 733       src_declared_type = as_array_klass(phi_declared_type(phi));
 734     }
 735     ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
 736     ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
 737     if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
 738       dst_declared_type = as_array_klass(phi_declared_type(phi));
 739     }
 740 
 741     if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
 742       // the types exactly match so the type is fully known
 743       is_exact = true;
 744       expected_type = src_exact_type;
 745     } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
 746       ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
 747       ciArrayKlass* src_type = NULL;
 748       if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
 749         src_type = (ciArrayKlass*) src_exact_type;
 750       } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
 751         src_type = (ciArrayKlass*) src_declared_type;
 752       }
 753       if (src_type != NULL) {
 754         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 755           is_exact = true;
 756           expected_type = dst_type;
 757         }
 758       }
 759     }
 760     // at least pass along a good guess
 761     if (expected_type == NULL) expected_type = dst_exact_type;
 762     if (expected_type == NULL) expected_type = src_declared_type;
 763     if (expected_type == NULL) expected_type = dst_declared_type;
 764 
 765     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 766     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 767   }
 768 
 769   // if a probable array type has been identified, figure out if any
 770   // of the required checks for a fast case can be elided.
 771   int flags = LIR_OpArrayCopy::all_flags;
 772 
 773   if (!src_objarray)
 774     flags &= ~LIR_OpArrayCopy::src_objarray;
 775   if (!dst_objarray)
 776     flags &= ~LIR_OpArrayCopy::dst_objarray;
 777 
 778   if (!x->arg_needs_null_check(0))
 779     flags &= ~LIR_OpArrayCopy::src_null_check;
 780   if (!x->arg_needs_null_check(2))
 781     flags &= ~LIR_OpArrayCopy::dst_null_check;
 782 
 783 
 784   if (expected_type != NULL) {
 785     Value length_limit = NULL;
 786 
 787     IfOp* ifop = length->as_IfOp();
 788     if (ifop != NULL) {
 789       // look for expressions like min(v, a.length) which ends up as
 790       //   x > y ? y : x  or  x >= y ? y : x
 791       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 792           ifop->x() == ifop->fval() &&
 793           ifop->y() == ifop->tval()) {
 794         length_limit = ifop->y();
 795       }
 796     }
 797 
 798     // try to skip null checks and range checks
 799     NewArray* src_array = src->as_NewArray();
 800     if (src_array != NULL) {
 801       flags &= ~LIR_OpArrayCopy::src_null_check;
 802       if (length_limit != NULL &&
 803           src_array->length() == length_limit &&
 804           is_constant_zero(src_pos)) {
 805         flags &= ~LIR_OpArrayCopy::src_range_check;
 806       }
 807     }
 808 
 809     NewArray* dst_array = dst->as_NewArray();
 810     if (dst_array != NULL) {
 811       flags &= ~LIR_OpArrayCopy::dst_null_check;
 812       if (length_limit != NULL &&
 813           dst_array->length() == length_limit &&
 814           is_constant_zero(dst_pos)) {
 815         flags &= ~LIR_OpArrayCopy::dst_range_check;
 816       }
 817     }
 818 
 819     // check from incoming constant values
 820     if (positive_constant(src_pos))
 821       flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
 822     if (positive_constant(dst_pos))
 823       flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
 824     if (positive_constant(length))
 825       flags &= ~LIR_OpArrayCopy::length_positive_check;
 826 
 827     // see if the range check can be elided, which might also imply
 828     // that src or dst is non-null.
 829     ArrayLength* al = length->as_ArrayLength();
 830     if (al != NULL) {
 831       if (al->array() == src) {
 832         // it's the length of the source array
 833         flags &= ~LIR_OpArrayCopy::length_positive_check;
 834         flags &= ~LIR_OpArrayCopy::src_null_check;
 835         if (is_constant_zero(src_pos))
 836           flags &= ~LIR_OpArrayCopy::src_range_check;
 837       }
 838       if (al->array() == dst) {
 839         // it's the length of the destination array
 840         flags &= ~LIR_OpArrayCopy::length_positive_check;
 841         flags &= ~LIR_OpArrayCopy::dst_null_check;
 842         if (is_constant_zero(dst_pos))
 843           flags &= ~LIR_OpArrayCopy::dst_range_check;
 844       }
 845     }
 846     if (is_exact) {
 847       flags &= ~LIR_OpArrayCopy::type_check;
 848     }
 849   }
 850 
 851   IntConstant* src_int = src_pos->type()->as_IntConstant();
 852   IntConstant* dst_int = dst_pos->type()->as_IntConstant();
 853   if (src_int && dst_int) {
 854     int s_offs = src_int->value();
 855     int d_offs = dst_int->value();
 856     if (src_int->value() >= dst_int->value()) {
 857       flags &= ~LIR_OpArrayCopy::overlapping;
 858     }
 859     if (expected_type != NULL) {
 860       BasicType t = expected_type->element_type()->basic_type();
 861       int element_size = type2aelembytes(t);
 862       if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
 863           ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
 864         flags &= ~LIR_OpArrayCopy::unaligned;
 865       }
 866     }
 867   } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
 868     // src and dest positions are the same, or dst is zero so assume
 869     // nonoverlapping copy.
 870     flags &= ~LIR_OpArrayCopy::overlapping;
 871   }
 872 
 873   if (src == dst) {
 874     // moving within a single array so no type checks are needed
 875     if (flags & LIR_OpArrayCopy::type_check) {
 876       flags &= ~LIR_OpArrayCopy::type_check;
 877     }
 878   }
 879   *flagsp = flags;
 880   *expected_typep = (ciArrayKlass*)expected_type;
 881 }
 882 
 883 
 884 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
 885   assert(opr->is_register(), "why spill if item is not register?");
 886 
 887   if (strict_fp_requires_explicit_rounding) {
 888 #ifdef IA32
 889     if (UseSSE < 1 && opr->is_single_fpu()) {
 890       LIR_Opr result = new_register(T_FLOAT);
 891       set_vreg_flag(result, must_start_in_memory);
 892       assert(opr->is_register(), "only a register can be spilled");
 893       assert(opr->value_type()->is_float(), "rounding only for floats available");
 894       __ roundfp(opr, LIR_OprFact::illegalOpr, result);
 895       return result;
 896     }
 897 #else
 898     Unimplemented();
 899 #endif // IA32
 900   }
 901   return opr;
 902 }
 903 
 904 
 905 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
 906   assert(type2size[t] == type2size[value->type()],
 907          "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
 908   if (!value->is_register()) {
 909     // force into a register
 910     LIR_Opr r = new_register(value->type());
 911     __ move(value, r);
 912     value = r;
 913   }
 914 
 915   // create a spill location
 916   LIR_Opr tmp = new_register(t);
 917   set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
 918 
 919   // move from register to spill
 920   __ move(value, tmp);
 921   return tmp;
 922 }
 923 
 924 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
 925   if (if_instr->should_profile()) {
 926     ciMethod* method = if_instr->profiled_method();
 927     assert(method != NULL, "method should be set if branch is profiled");
 928     ciMethodData* md = method->method_data_or_null();
 929     assert(md != NULL, "Sanity");
 930     ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
 931     assert(data != NULL, "must have profiling data");
 932     assert(data->is_BranchData(), "need BranchData for two-way branches");
 933     int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
 934     int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
 935     if (if_instr->is_swapped()) {
 936       int t = taken_count_offset;
 937       taken_count_offset = not_taken_count_offset;
 938       not_taken_count_offset = t;
 939     }
 940 
 941     LIR_Opr md_reg = new_register(T_METADATA);
 942     __ metadata2reg(md->constant_encoding(), md_reg);
 943 
 944     LIR_Opr data_offset_reg = new_pointer_register();
 945     __ cmove(lir_cond(cond),
 946              LIR_OprFact::intptrConst(taken_count_offset),
 947              LIR_OprFact::intptrConst(not_taken_count_offset),
 948              data_offset_reg, as_BasicType(if_instr->x()->type()));
 949 
 950     // MDO cells are intptr_t, so the data_reg width is arch-dependent.
 951     LIR_Opr data_reg = new_pointer_register();
 952     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
 953     __ move(data_addr, data_reg);
 954     // Use leal instead of add to avoid destroying condition codes on x86
 955     LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
 956     __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
 957     __ move(data_reg, data_addr);
 958   }
 959 }
 960 
 961 // Phi technique:
 962 // This is about passing live values from one basic block to the other.
 963 // In code generated with Java it is rather rare that more than one
 964 // value is on the stack from one basic block to the other.
 965 // We optimize our technique for efficient passing of one value
 966 // (of type long, int, double..) but it can be extended.
 967 // When entering or leaving a basic block, all registers and all spill
 968 // slots are release and empty. We use the released registers
 969 // and spill slots to pass the live values from one block
 970 // to the other. The topmost value, i.e., the value on TOS of expression
 971 // stack is passed in registers. All other values are stored in spilling
 972 // area. Every Phi has an index which designates its spill slot
 973 // At exit of a basic block, we fill the register(s) and spill slots.
 974 // At entry of a basic block, the block_prolog sets up the content of phi nodes
 975 // and locks necessary registers and spilling slots.
 976 
 977 
 978 // move current value to referenced phi function
 979 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
 980   Phi* phi = sux_val->as_Phi();
 981   // cur_val can be null without phi being null in conjunction with inlining
 982   if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
 983     if (phi->is_local()) {
 984       for (int i = 0; i < phi->operand_count(); i++) {
 985         Value op = phi->operand_at(i);
 986         if (op != NULL && op->type()->is_illegal()) {
 987           bailout("illegal phi operand");
 988         }
 989       }
 990     }
 991     Phi* cur_phi = cur_val->as_Phi();
 992     if (cur_phi != NULL && cur_phi->is_illegal()) {
 993       // Phi and local would need to get invalidated
 994       // (which is unexpected for Linear Scan).
 995       // But this case is very rare so we simply bail out.
 996       bailout("propagation of illegal phi");
 997       return;
 998     }
 999     LIR_Opr operand = cur_val->operand();
1000     if (operand->is_illegal()) {
1001       assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1002              "these can be produced lazily");
1003       operand = operand_for_instruction(cur_val);
1004     }
1005     resolver->move(operand, operand_for_instruction(phi));
1006   }
1007 }
1008 
1009 
1010 // Moves all stack values into their PHI position
1011 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1012   BlockBegin* bb = block();
1013   if (bb->number_of_sux() == 1) {
1014     BlockBegin* sux = bb->sux_at(0);
1015     assert(sux->number_of_preds() > 0, "invalid CFG");
1016 
1017     // a block with only one predecessor never has phi functions
1018     if (sux->number_of_preds() > 1) {
1019       PhiResolver resolver(this);
1020 
1021       ValueStack* sux_state = sux->state();
1022       Value sux_value;
1023       int index;
1024 
1025       assert(cur_state->scope() == sux_state->scope(), "not matching");
1026       assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1027       assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1028 
1029       for_each_stack_value(sux_state, index, sux_value) {
1030         move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1031       }
1032 
1033       for_each_local_value(sux_state, index, sux_value) {
1034         move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1035       }
1036 
1037       assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1038     }
1039   }
1040 }
1041 
1042 
1043 LIR_Opr LIRGenerator::new_register(BasicType type) {
1044   int vreg_num = _virtual_register_number;
1045   // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out
1046   // a few extra registers before we really run out which helps to avoid to trip over assertions.
1047   if (vreg_num + 20 >= LIR_OprDesc::vreg_max) {
1048     bailout("out of virtual registers in LIR generator");
1049     if (vreg_num + 2 >= LIR_OprDesc::vreg_max) {
1050       // Wrap it around and continue until bailout really happens to avoid hitting assertions.
1051       _virtual_register_number = LIR_OprDesc::vreg_base;
1052       vreg_num = LIR_OprDesc::vreg_base;
1053     }
1054   }
1055   _virtual_register_number += 1;
1056   LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type);
1057   assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");
1058   return vreg;
1059 }
1060 
1061 
1062 // Try to lock using register in hint
1063 LIR_Opr LIRGenerator::rlock(Value instr) {
1064   return new_register(instr->type());
1065 }
1066 
1067 
1068 // does an rlock and sets result
1069 LIR_Opr LIRGenerator::rlock_result(Value x) {
1070   LIR_Opr reg = rlock(x);
1071   set_result(x, reg);
1072   return reg;
1073 }
1074 
1075 
1076 // does an rlock and sets result
1077 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1078   LIR_Opr reg;
1079   switch (type) {
1080   case T_BYTE:
1081   case T_BOOLEAN:
1082     reg = rlock_byte(type);
1083     break;
1084   default:
1085     reg = rlock(x);
1086     break;
1087   }
1088 
1089   set_result(x, reg);
1090   return reg;
1091 }
1092 
1093 
1094 //---------------------------------------------------------------------
1095 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1096   ObjectType* oc = value->type()->as_ObjectType();
1097   if (oc) {
1098     return oc->constant_value();
1099   }
1100   return NULL;
1101 }
1102 
1103 
1104 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1105   assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1106   assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1107 
1108   // no moves are created for phi functions at the begin of exception
1109   // handlers, so assign operands manually here
1110   for_each_phi_fun(block(), phi,
1111                    if (!phi->is_illegal()) { operand_for_instruction(phi); });
1112 
1113   LIR_Opr thread_reg = getThreadPointer();
1114   __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1115                exceptionOopOpr());
1116   __ move_wide(LIR_OprFact::oopConst(NULL),
1117                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1118   __ move_wide(LIR_OprFact::oopConst(NULL),
1119                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1120 
1121   LIR_Opr result = new_register(T_OBJECT);
1122   __ move(exceptionOopOpr(), result);
1123   set_result(x, result);
1124 }
1125 
1126 
1127 //----------------------------------------------------------------------
1128 //----------------------------------------------------------------------
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 //                        visitor functions
1132 //----------------------------------------------------------------------
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1136 
1137 void LIRGenerator::do_Phi(Phi* x) {
1138   // phi functions are never visited directly
1139   ShouldNotReachHere();
1140 }
1141 
1142 
1143 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1144 void LIRGenerator::do_Constant(Constant* x) {
1145   if (x->state_before() != NULL) {
1146     // Any constant with a ValueStack requires patching so emit the patch here
1147     LIR_Opr reg = rlock_result(x);
1148     CodeEmitInfo* info = state_for(x, x->state_before());
1149     __ oop2reg_patch(NULL, reg, info);
1150   } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1151     if (!x->is_pinned()) {
1152       // unpinned constants are handled specially so that they can be
1153       // put into registers when they are used multiple times within a
1154       // block.  After the block completes their operand will be
1155       // cleared so that other blocks can't refer to that register.
1156       set_result(x, load_constant(x));
1157     } else {
1158       LIR_Opr res = x->operand();
1159       if (!res->is_valid()) {
1160         res = LIR_OprFact::value_type(x->type());
1161       }
1162       if (res->is_constant()) {
1163         LIR_Opr reg = rlock_result(x);
1164         __ move(res, reg);
1165       } else {
1166         set_result(x, res);
1167       }
1168     }
1169   } else {
1170     set_result(x, LIR_OprFact::value_type(x->type()));
1171   }
1172 }
1173 
1174 
1175 void LIRGenerator::do_Local(Local* x) {
1176   // operand_for_instruction has the side effect of setting the result
1177   // so there's no need to do it here.
1178   operand_for_instruction(x);
1179 }
1180 
1181 
1182 void LIRGenerator::do_Return(Return* x) {
1183   if (compilation()->env()->dtrace_method_probes()) {
1184     BasicTypeList signature;
1185     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
1186     signature.append(T_METADATA); // Method*
1187     LIR_OprList* args = new LIR_OprList();
1188     args->append(getThreadPointer());
1189     LIR_Opr meth = new_register(T_METADATA);
1190     __ metadata2reg(method()->constant_encoding(), meth);
1191     args->append(meth);
1192     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1193   }
1194 
1195   if (x->type()->is_void()) {
1196     __ return_op(LIR_OprFact::illegalOpr);
1197   } else {
1198     LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1199     LIRItem result(x->result(), this);
1200 
1201     result.load_item_force(reg);
1202     __ return_op(result.result());
1203   }
1204   set_no_result(x);
1205 }
1206 
1207 // Examble: ref.get()
1208 // Combination of LoadField and g1 pre-write barrier
1209 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1210 
1211   const int referent_offset = java_lang_ref_Reference::referent_offset();
1212 
1213   assert(x->number_of_arguments() == 1, "wrong type");
1214 
1215   LIRItem reference(x->argument_at(0), this);
1216   reference.load_item();
1217 
1218   // need to perform the null check on the reference objecy
1219   CodeEmitInfo* info = NULL;
1220   if (x->needs_null_check()) {
1221     info = state_for(x);
1222   }
1223 
1224   LIR_Opr result = rlock_result(x, T_OBJECT);
1225   access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
1226                  reference, LIR_OprFact::intConst(referent_offset), result,
1227                  nullptr, info);
1228 }
1229 
1230 // Example: clazz.isInstance(object)
1231 void LIRGenerator::do_isInstance(Intrinsic* x) {
1232   assert(x->number_of_arguments() == 2, "wrong type");
1233 
1234   // TODO could try to substitute this node with an equivalent InstanceOf
1235   // if clazz is known to be a constant Class. This will pick up newly found
1236   // constants after HIR construction. I'll leave this to a future change.
1237 
1238   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1239   // could follow the aastore example in a future change.
1240 
1241   LIRItem clazz(x->argument_at(0), this);
1242   LIRItem object(x->argument_at(1), this);
1243   clazz.load_item();
1244   object.load_item();
1245   LIR_Opr result = rlock_result(x);
1246 
1247   // need to perform null check on clazz
1248   if (x->needs_null_check()) {
1249     CodeEmitInfo* info = state_for(x);
1250     __ null_check(clazz.result(), info);
1251   }
1252 
1253   LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1254                                      CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1255                                      x->type(),
1256                                      NULL); // NULL CodeEmitInfo results in a leaf call
1257   __ move(call_result, result);
1258 }
1259 
1260 void LIRGenerator::load_klass(LIR_Opr obj, LIR_Opr klass, CodeEmitInfo* null_check_info) {
1261   CodeStub* slow_path = UseCompactObjectHeaders ? new LoadKlassStub(klass) : NULL;
1262   __ load_klass(obj, klass, null_check_info, slow_path);
1263 }
1264 
1265 // Example: object.getClass ()
1266 void LIRGenerator::do_getClass(Intrinsic* x) {
1267   assert(x->number_of_arguments() == 1, "wrong type");
1268 
1269   LIRItem rcvr(x->argument_at(0), this);
1270   rcvr.load_item();
1271   LIR_Opr temp = new_register(T_ADDRESS);
1272   LIR_Opr result = rlock_result(x);
1273 
1274   // need to perform the null check on the rcvr
1275   CodeEmitInfo* info = NULL;
1276   if (x->needs_null_check()) {
1277     info = state_for(x);
1278   }
1279 
1280   LIR_Opr klass = new_register(T_METADATA);
1281   load_klass(rcvr.result(), klass, info);
1282   __ move_wide(new LIR_Address(klass, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);
1283   // mirror = ((OopHandle)mirror)->resolve();
1284   access_load(IN_NATIVE, T_OBJECT,
1285               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
1286 }
1287 
1288 // java.lang.Class::isPrimitive()
1289 void LIRGenerator::do_isPrimitive(Intrinsic* x) {
1290   assert(x->number_of_arguments() == 1, "wrong type");
1291 
1292   LIRItem rcvr(x->argument_at(0), this);
1293   rcvr.load_item();
1294   LIR_Opr temp = new_register(T_METADATA);
1295   LIR_Opr result = rlock_result(x);
1296 
1297   CodeEmitInfo* info = NULL;
1298   if (x->needs_null_check()) {
1299     info = state_for(x);
1300   }
1301 
1302   __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset(), T_ADDRESS), temp, info);
1303   __ cmp(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0));
1304   __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);
1305 }
1306 
1307 // Example: Foo.class.getModifiers()
1308 void LIRGenerator::do_getModifiers(Intrinsic* x) {
1309   assert(x->number_of_arguments() == 1, "wrong type");
1310 
1311   LIRItem receiver(x->argument_at(0), this);
1312   receiver.load_item();
1313   LIR_Opr result = rlock_result(x);
1314 
1315   CodeEmitInfo* info = NULL;
1316   if (x->needs_null_check()) {
1317     info = state_for(x);
1318   }
1319 
1320   // While reading off the universal constant mirror is less efficient than doing
1321   // another branch and returning the constant answer, this branchless code runs into
1322   // much less risk of confusion for C1 register allocator. The choice of the universe
1323   // object here is correct as long as it returns the same modifiers we would expect
1324   // from the primitive class itself. See spec for Class.getModifiers that provides
1325   // the typed array klasses with similar modifiers as their component types.
1326 
1327   Klass* univ_klass_obj = Universe::byteArrayKlassObj();
1328   assert(univ_klass_obj->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC), "Sanity");
1329   LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass_obj);
1330 
1331   LIR_Opr recv_klass = new_register(T_METADATA);
1332   __ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info);
1333 
1334   // Check if this is a Java mirror of primitive type, and select the appropriate klass.
1335   LIR_Opr klass = new_register(T_METADATA);
1336   __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0));
1337   __ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);
1338 
1339   // Get the answer.
1340   __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result);
1341 }
1342 
1343 // Example: Thread.currentThread()
1344 void LIRGenerator::do_currentThread(Intrinsic* x) {
1345   assert(x->number_of_arguments() == 0, "wrong type");
1346   LIR_Opr temp = new_register(T_ADDRESS);
1347   LIR_Opr reg = rlock_result(x);
1348   __ move(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_ADDRESS), temp);
1349   // threadObj = ((OopHandle)_threadObj)->resolve();
1350   access_load(IN_NATIVE, T_OBJECT,
1351               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg);
1352 }
1353 
1354 void LIRGenerator::do_getObjectSize(Intrinsic* x) {
1355   assert(x->number_of_arguments() == 3, "wrong type");
1356   LIR_Opr result_reg = rlock_result(x);
1357 
1358   LIRItem value(x->argument_at(2), this);
1359   value.load_item();
1360 
1361   LIR_Opr klass = new_register(T_METADATA);
1362   load_klass(value.result(), klass, NULL);
1363   LIR_Opr layout = new_register(T_INT);
1364   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1365 
1366   LabelObj* L_done = new LabelObj();
1367   LabelObj* L_array = new LabelObj();
1368 
1369   __ cmp(lir_cond_lessEqual, layout, 0);
1370   __ branch(lir_cond_lessEqual, L_array->label());
1371 
1372   // Instance case: the layout helper gives us instance size almost directly,
1373   // but we need to mask out the _lh_instance_slow_path_bit.
1374   __ convert(Bytecodes::_i2l, layout, result_reg);
1375 
1376   assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
1377   jlong mask = ~(jlong) right_n_bits(LogBytesPerLong);
1378   __ logical_and(result_reg, LIR_OprFact::longConst(mask), result_reg);
1379 
1380   __ branch(lir_cond_always, L_done->label());
1381 
1382   // Array case: size is round(header + element_size*arraylength).
1383   // Since arraylength is different for every array instance, we have to
1384   // compute the whole thing at runtime.
1385 
1386   __ branch_destination(L_array->label());
1387 
1388   int round_mask = MinObjAlignmentInBytes - 1;
1389 
1390   // Figure out header sizes first.
1391   LIR_Opr hss = LIR_OprFact::intConst(Klass::_lh_header_size_shift);
1392   LIR_Opr hsm = LIR_OprFact::intConst(Klass::_lh_header_size_mask);
1393 
1394   LIR_Opr header_size = new_register(T_INT);
1395   __ move(layout, header_size);
1396   LIR_Opr tmp = new_register(T_INT);
1397   __ unsigned_shift_right(header_size, hss, header_size, tmp);
1398   __ logical_and(header_size, hsm, header_size);
1399   __ add(header_size, LIR_OprFact::intConst(round_mask), header_size);
1400 
1401   // Figure out the array length in bytes
1402   assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
1403   LIR_Opr l2esm = LIR_OprFact::intConst(Klass::_lh_log2_element_size_mask);
1404   __ logical_and(layout, l2esm, layout);
1405 
1406   LIR_Opr length_int = new_register(T_INT);
1407   __ move(new LIR_Address(value.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), length_int);
1408 
1409 #ifdef _LP64
1410   LIR_Opr length = new_register(T_LONG);
1411   __ convert(Bytecodes::_i2l, length_int, length);
1412 #endif
1413 
1414   // Shift-left awkwardness. Normally it is just:
1415   //   __ shift_left(length, layout, length);
1416   // But C1 cannot perform shift_left with non-constant count, so we end up
1417   // doing the per-bit loop dance here. x86_32 also does not know how to shift
1418   // longs, so we have to act on ints.
1419   LabelObj* L_shift_loop = new LabelObj();
1420   LabelObj* L_shift_exit = new LabelObj();
1421 
1422   __ branch_destination(L_shift_loop->label());
1423   __ cmp(lir_cond_equal, layout, 0);
1424   __ branch(lir_cond_equal, L_shift_exit->label());
1425 
1426 #ifdef _LP64
1427   __ shift_left(length, 1, length);
1428 #else
1429   __ shift_left(length_int, 1, length_int);
1430 #endif
1431 
1432   __ sub(layout, LIR_OprFact::intConst(1), layout);
1433 
1434   __ branch(lir_cond_always, L_shift_loop->label());
1435   __ branch_destination(L_shift_exit->label());
1436 
1437   // Mix all up, round, and push to the result.
1438 #ifdef _LP64
1439   LIR_Opr header_size_long = new_register(T_LONG);
1440   __ convert(Bytecodes::_i2l, header_size, header_size_long);
1441   __ add(length, header_size_long, length);
1442   if (round_mask != 0) {
1443     __ logical_and(length, LIR_OprFact::longConst(~round_mask), length);
1444   }
1445   __ move(length, result_reg);
1446 #else
1447   __ add(length_int, header_size, length_int);
1448   if (round_mask != 0) {
1449     __ logical_and(length_int, LIR_OprFact::intConst(~round_mask), length_int);
1450   }
1451   __ convert(Bytecodes::_i2l, length_int, result_reg);
1452 #endif
1453 
1454   __ branch_destination(L_done->label());
1455 }
1456 
1457 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1458   assert(x->number_of_arguments() == 1, "wrong type");
1459   LIRItem receiver(x->argument_at(0), this);
1460 
1461   receiver.load_item();
1462   BasicTypeList signature;
1463   signature.append(T_OBJECT); // receiver
1464   LIR_OprList* args = new LIR_OprList();
1465   args->append(receiver.result());
1466   CodeEmitInfo* info = state_for(x, x->state());
1467   call_runtime(&signature, args,
1468                CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1469                voidType, info);
1470 
1471   set_no_result(x);
1472 }
1473 
1474 
1475 //------------------------local access--------------------------------------
1476 
1477 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1478   if (x->operand()->is_illegal()) {
1479     Constant* c = x->as_Constant();
1480     if (c != NULL) {
1481       x->set_operand(LIR_OprFact::value_type(c->type()));
1482     } else {
1483       assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1484       // allocate a virtual register for this local or phi
1485       x->set_operand(rlock(x));
1486       _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1487     }
1488   }
1489   return x->operand();
1490 }
1491 
1492 
1493 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1494   if (opr->is_virtual()) {
1495     return instruction_for_vreg(opr->vreg_number());
1496   }
1497   return NULL;
1498 }
1499 
1500 
1501 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1502   if (reg_num < _instruction_for_operand.length()) {
1503     return _instruction_for_operand.at(reg_num);
1504   }
1505   return NULL;
1506 }
1507 
1508 
1509 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1510   if (_vreg_flags.size_in_bits() == 0) {
1511     BitMap2D temp(100, num_vreg_flags);
1512     _vreg_flags = temp;
1513   }
1514   _vreg_flags.at_put_grow(vreg_num, f, true);
1515 }
1516 
1517 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1518   if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1519     return false;
1520   }
1521   return _vreg_flags.at(vreg_num, f);
1522 }
1523 
1524 
1525 // Block local constant handling.  This code is useful for keeping
1526 // unpinned constants and constants which aren't exposed in the IR in
1527 // registers.  Unpinned Constant instructions have their operands
1528 // cleared when the block is finished so that other blocks can't end
1529 // up referring to their registers.
1530 
1531 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1532   assert(!x->is_pinned(), "only for unpinned constants");
1533   _unpinned_constants.append(x);
1534   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1535 }
1536 
1537 
1538 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1539   BasicType t = c->type();
1540   for (int i = 0; i < _constants.length(); i++) {
1541     LIR_Const* other = _constants.at(i);
1542     if (t == other->type()) {
1543       switch (t) {
1544       case T_INT:
1545       case T_FLOAT:
1546         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1547         break;
1548       case T_LONG:
1549       case T_DOUBLE:
1550         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1551         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1552         break;
1553       case T_OBJECT:
1554         if (c->as_jobject() != other->as_jobject()) continue;
1555         break;
1556       default:
1557         break;
1558       }
1559       return _reg_for_constants.at(i);
1560     }
1561   }
1562 
1563   LIR_Opr result = new_register(t);
1564   __ move((LIR_Opr)c, result);
1565   _constants.append(c);
1566   _reg_for_constants.append(result);
1567   return result;
1568 }
1569 
1570 //------------------------field access--------------------------------------
1571 
1572 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1573   assert(x->number_of_arguments() == 4, "wrong type");
1574   LIRItem obj   (x->argument_at(0), this);  // object
1575   LIRItem offset(x->argument_at(1), this);  // offset of field
1576   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1577   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1578   assert(obj.type()->tag() == objectTag, "invalid type");
1579   assert(cmp.type()->tag() == type->tag(), "invalid type");
1580   assert(val.type()->tag() == type->tag(), "invalid type");
1581 
1582   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1583                                             obj, offset, cmp, val);
1584   set_result(x, result);
1585 }
1586 
1587 // Comment copied form templateTable_i486.cpp
1588 // ----------------------------------------------------------------------------
1589 // Volatile variables demand their effects be made known to all CPU's in
1590 // order.  Store buffers on most chips allow reads & writes to reorder; the
1591 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1592 // memory barrier (i.e., it's not sufficient that the interpreter does not
1593 // reorder volatile references, the hardware also must not reorder them).
1594 //
1595 // According to the new Java Memory Model (JMM):
1596 // (1) All volatiles are serialized wrt to each other.
1597 // ALSO reads & writes act as aquire & release, so:
1598 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1599 // the read float up to before the read.  It's OK for non-volatile memory refs
1600 // that happen before the volatile read to float down below it.
1601 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1602 // that happen BEFORE the write float down to after the write.  It's OK for
1603 // non-volatile memory refs that happen after the volatile write to float up
1604 // before it.
1605 //
1606 // We only put in barriers around volatile refs (they are expensive), not
1607 // _between_ memory refs (that would require us to track the flavor of the
1608 // previous memory refs).  Requirements (2) and (3) require some barriers
1609 // before volatile stores and after volatile loads.  These nearly cover
1610 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1611 // case is placed after volatile-stores although it could just as well go
1612 // before volatile-loads.
1613 
1614 
1615 void LIRGenerator::do_StoreField(StoreField* x) {
1616   bool needs_patching = x->needs_patching();
1617   bool is_volatile = x->field()->is_volatile();
1618   BasicType field_type = x->field_type();
1619 
1620   CodeEmitInfo* info = NULL;
1621   if (needs_patching) {
1622     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1623     info = state_for(x, x->state_before());
1624   } else if (x->needs_null_check()) {
1625     NullCheck* nc = x->explicit_null_check();
1626     if (nc == NULL) {
1627       info = state_for(x);
1628     } else {
1629       info = state_for(nc);
1630     }
1631   }
1632 
1633   LIRItem object(x->obj(), this);
1634   LIRItem value(x->value(),  this);
1635 
1636   object.load_item();
1637 
1638   if (is_volatile || needs_patching) {
1639     // load item if field is volatile (fewer special cases for volatiles)
1640     // load item if field not initialized
1641     // load item if field not constant
1642     // because of code patching we cannot inline constants
1643     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1644       value.load_byte_item();
1645     } else  {
1646       value.load_item();
1647     }
1648   } else {
1649     value.load_for_store(field_type);
1650   }
1651 
1652   set_no_result(x);
1653 
1654 #ifndef PRODUCT
1655   if (PrintNotLoaded && needs_patching) {
1656     tty->print_cr("   ###class not loaded at store_%s bci %d",
1657                   x->is_static() ?  "static" : "field", x->printable_bci());
1658   }
1659 #endif
1660 
1661   if (x->needs_null_check() &&
1662       (needs_patching ||
1663        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1664     // Emit an explicit null check because the offset is too large.
1665     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1666     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1667     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1668   }
1669 
1670   DecoratorSet decorators = IN_HEAP;
1671   if (is_volatile) {
1672     decorators |= MO_SEQ_CST;
1673   }
1674   if (needs_patching) {
1675     decorators |= C1_NEEDS_PATCHING;
1676   }
1677 
1678   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1679                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1680 }
1681 
1682 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1683   assert(x->is_pinned(),"");
1684   bool needs_range_check = x->compute_needs_range_check();
1685   bool use_length = x->length() != NULL;
1686   bool obj_store = is_reference_type(x->elt_type());
1687   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
1688                                          !get_jobject_constant(x->value())->is_null_object() ||
1689                                          x->should_profile());
1690 
1691   LIRItem array(x->array(), this);
1692   LIRItem index(x->index(), this);
1693   LIRItem value(x->value(), this);
1694   LIRItem length(this);
1695 
1696   array.load_item();
1697   index.load_nonconstant();
1698 
1699   if (use_length && needs_range_check) {
1700     length.set_instruction(x->length());
1701     length.load_item();
1702 
1703   }
1704   if (needs_store_check || x->check_boolean()) {
1705     value.load_item();
1706   } else {
1707     value.load_for_store(x->elt_type());
1708   }
1709 
1710   set_no_result(x);
1711 
1712   // the CodeEmitInfo must be duplicated for each different
1713   // LIR-instruction because spilling can occur anywhere between two
1714   // instructions and so the debug information must be different
1715   CodeEmitInfo* range_check_info = state_for(x);
1716   CodeEmitInfo* null_check_info = NULL;
1717   if (x->needs_null_check()) {
1718     null_check_info = new CodeEmitInfo(range_check_info);
1719   }
1720 
1721   if (GenerateRangeChecks && needs_range_check) {
1722     if (use_length) {
1723       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1724       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1725     } else {
1726       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1727       // range_check also does the null check
1728       null_check_info = NULL;
1729     }
1730   }
1731 
1732   if (GenerateArrayStoreCheck && needs_store_check) {
1733     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1734     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1735   }
1736 
1737   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1738   if (x->check_boolean()) {
1739     decorators |= C1_MASK_BOOLEAN;
1740   }
1741 
1742   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1743                   NULL, null_check_info);
1744 }
1745 
1746 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1747                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1748                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1749   decorators |= ACCESS_READ;
1750   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1751   if (access.is_raw()) {
1752     _barrier_set->BarrierSetC1::load_at(access, result);
1753   } else {
1754     _barrier_set->load_at(access, result);
1755   }
1756 }
1757 
1758 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1759                                LIR_Opr addr, LIR_Opr result) {
1760   decorators |= ACCESS_READ;
1761   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1762   access.set_resolved_addr(addr);
1763   if (access.is_raw()) {
1764     _barrier_set->BarrierSetC1::load(access, result);
1765   } else {
1766     _barrier_set->load(access, result);
1767   }
1768 }
1769 
1770 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1771                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
1772                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
1773   decorators |= ACCESS_WRITE;
1774   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
1775   if (access.is_raw()) {
1776     _barrier_set->BarrierSetC1::store_at(access, value);
1777   } else {
1778     _barrier_set->store_at(access, value);
1779   }
1780 }
1781 
1782 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1783                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1784   decorators |= ACCESS_READ;
1785   decorators |= ACCESS_WRITE;
1786   // Atomic operations are SEQ_CST by default
1787   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1788   LIRAccess access(this, decorators, base, offset, type);
1789   if (access.is_raw()) {
1790     return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
1791   } else {
1792     return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
1793   }
1794 }
1795 
1796 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
1797                                             LIRItem& base, LIRItem& offset, LIRItem& value) {
1798   decorators |= ACCESS_READ;
1799   decorators |= ACCESS_WRITE;
1800   // Atomic operations are SEQ_CST by default
1801   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1802   LIRAccess access(this, decorators, base, offset, type);
1803   if (access.is_raw()) {
1804     return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
1805   } else {
1806     return _barrier_set->atomic_xchg_at(access, value);
1807   }
1808 }
1809 
1810 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1811                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
1812   decorators |= ACCESS_READ;
1813   decorators |= ACCESS_WRITE;
1814   // Atomic operations are SEQ_CST by default
1815   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1816   LIRAccess access(this, decorators, base, offset, type);
1817   if (access.is_raw()) {
1818     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1819   } else {
1820     return _barrier_set->atomic_add_at(access, value);
1821   }
1822 }
1823 
1824 void LIRGenerator::do_LoadField(LoadField* x) {
1825   bool needs_patching = x->needs_patching();
1826   bool is_volatile = x->field()->is_volatile();
1827   BasicType field_type = x->field_type();
1828 
1829   CodeEmitInfo* info = NULL;
1830   if (needs_patching) {
1831     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1832     info = state_for(x, x->state_before());
1833   } else if (x->needs_null_check()) {
1834     NullCheck* nc = x->explicit_null_check();
1835     if (nc == NULL) {
1836       info = state_for(x);
1837     } else {
1838       info = state_for(nc);
1839     }
1840   }
1841 
1842   LIRItem object(x->obj(), this);
1843 
1844   object.load_item();
1845 
1846 #ifndef PRODUCT
1847   if (PrintNotLoaded && needs_patching) {
1848     tty->print_cr("   ###class not loaded at load_%s bci %d",
1849                   x->is_static() ?  "static" : "field", x->printable_bci());
1850   }
1851 #endif
1852 
1853   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1854   if (x->needs_null_check() &&
1855       (needs_patching ||
1856        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1857        stress_deopt)) {
1858     LIR_Opr obj = object.result();
1859     if (stress_deopt) {
1860       obj = new_register(T_OBJECT);
1861       __ move(LIR_OprFact::oopConst(NULL), obj);
1862     }
1863     // Emit an explicit null check because the offset is too large.
1864     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1865     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1866     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1867   }
1868 
1869   DecoratorSet decorators = IN_HEAP;
1870   if (is_volatile) {
1871     decorators |= MO_SEQ_CST;
1872   }
1873   if (needs_patching) {
1874     decorators |= C1_NEEDS_PATCHING;
1875   }
1876 
1877   LIR_Opr result = rlock_result(x, field_type);
1878   access_load_at(decorators, field_type,
1879                  object, LIR_OprFact::intConst(x->offset()), result,
1880                  info ? new CodeEmitInfo(info) : NULL, info);
1881 }
1882 
1883 
1884 //------------------------java.nio.Buffer.checkIndex------------------------
1885 
1886 // int java.nio.Buffer.checkIndex(int)
1887 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1888   // NOTE: by the time we are in checkIndex() we are guaranteed that
1889   // the buffer is non-null (because checkIndex is package-private and
1890   // only called from within other methods in the buffer).
1891   assert(x->number_of_arguments() == 2, "wrong type");
1892   LIRItem buf  (x->argument_at(0), this);
1893   LIRItem index(x->argument_at(1), this);
1894   buf.load_item();
1895   index.load_item();
1896 
1897   LIR_Opr result = rlock_result(x);
1898   if (GenerateRangeChecks) {
1899     CodeEmitInfo* info = state_for(x);
1900     CodeStub* stub = new RangeCheckStub(info, index.result());
1901     if (index.result()->is_constant()) {
1902       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1903       __ branch(lir_cond_belowEqual, stub);
1904     } else {
1905       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1906                   java_nio_Buffer::limit_offset(), T_INT, info);
1907       __ branch(lir_cond_aboveEqual, stub);
1908     }
1909     __ move(index.result(), result);
1910   } else {
1911     // Just load the index into the result register
1912     __ move(index.result(), result);
1913   }
1914 }
1915 
1916 
1917 //------------------------array access--------------------------------------
1918 
1919 
1920 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1921   LIRItem array(x->array(), this);
1922   array.load_item();
1923   LIR_Opr reg = rlock_result(x);
1924 
1925   CodeEmitInfo* info = NULL;
1926   if (x->needs_null_check()) {
1927     NullCheck* nc = x->explicit_null_check();
1928     if (nc == NULL) {
1929       info = state_for(x);
1930     } else {
1931       info = state_for(nc);
1932     }
1933     if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1934       LIR_Opr obj = new_register(T_OBJECT);
1935       __ move(LIR_OprFact::oopConst(NULL), obj);
1936       __ null_check(obj, new CodeEmitInfo(info));
1937     }
1938   }
1939   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1940 }
1941 
1942 
1943 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1944   bool use_length = x->length() != NULL;
1945   LIRItem array(x->array(), this);
1946   LIRItem index(x->index(), this);
1947   LIRItem length(this);
1948   bool needs_range_check = x->compute_needs_range_check();
1949 
1950   if (use_length && needs_range_check) {
1951     length.set_instruction(x->length());
1952     length.load_item();
1953   }
1954 
1955   array.load_item();
1956   if (index.is_constant() && can_inline_as_constant(x->index())) {
1957     // let it be a constant
1958     index.dont_load_item();
1959   } else {
1960     index.load_item();
1961   }
1962 
1963   CodeEmitInfo* range_check_info = state_for(x);
1964   CodeEmitInfo* null_check_info = NULL;
1965   if (x->needs_null_check()) {
1966     NullCheck* nc = x->explicit_null_check();
1967     if (nc != NULL) {
1968       null_check_info = state_for(nc);
1969     } else {
1970       null_check_info = range_check_info;
1971     }
1972     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1973       LIR_Opr obj = new_register(T_OBJECT);
1974       __ move(LIR_OprFact::oopConst(NULL), obj);
1975       __ null_check(obj, new CodeEmitInfo(null_check_info));
1976     }
1977   }
1978 
1979   if (GenerateRangeChecks && needs_range_check) {
1980     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1981       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
1982     } else if (use_length) {
1983       // TODO: use a (modified) version of array_range_check that does not require a
1984       //       constant length to be loaded to a register
1985       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1986       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1987     } else {
1988       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1989       // The range check performs the null check, so clear it out for the load
1990       null_check_info = NULL;
1991     }
1992   }
1993 
1994   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1995 
1996   LIR_Opr result = rlock_result(x, x->elt_type());
1997   access_load_at(decorators, x->elt_type(),
1998                  array, index.result(), result,
1999                  NULL, null_check_info);
2000 }
2001 
2002 
2003 void LIRGenerator::do_NullCheck(NullCheck* x) {
2004   if (x->can_trap()) {
2005     LIRItem value(x->obj(), this);
2006     value.load_item();
2007     CodeEmitInfo* info = state_for(x);
2008     __ null_check(value.result(), info);
2009   }
2010 }
2011 
2012 
2013 void LIRGenerator::do_TypeCast(TypeCast* x) {
2014   LIRItem value(x->obj(), this);
2015   value.load_item();
2016   // the result is the same as from the node we are casting
2017   set_result(x, value.result());
2018 }
2019 
2020 
2021 void LIRGenerator::do_Throw(Throw* x) {
2022   LIRItem exception(x->exception(), this);
2023   exception.load_item();
2024   set_no_result(x);
2025   LIR_Opr exception_opr = exception.result();
2026   CodeEmitInfo* info = state_for(x, x->state());
2027 
2028 #ifndef PRODUCT
2029   if (PrintC1Statistics) {
2030     increment_counter(Runtime1::throw_count_address(), T_INT);
2031   }
2032 #endif
2033 
2034   // check if the instruction has an xhandler in any of the nested scopes
2035   bool unwind = false;
2036   if (info->exception_handlers()->length() == 0) {
2037     // this throw is not inside an xhandler
2038     unwind = true;
2039   } else {
2040     // get some idea of the throw type
2041     bool type_is_exact = true;
2042     ciType* throw_type = x->exception()->exact_type();
2043     if (throw_type == NULL) {
2044       type_is_exact = false;
2045       throw_type = x->exception()->declared_type();
2046     }
2047     if (throw_type != NULL && throw_type->is_instance_klass()) {
2048       ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2049       unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2050     }
2051   }
2052 
2053   // do null check before moving exception oop into fixed register
2054   // to avoid a fixed interval with an oop during the null check.
2055   // Use a copy of the CodeEmitInfo because debug information is
2056   // different for null_check and throw.
2057   if (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL) {
2058     // if the exception object wasn't created using new then it might be null.
2059     __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2060   }
2061 
2062   if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2063     // we need to go through the exception lookup path to get JVMTI
2064     // notification done
2065     unwind = false;
2066   }
2067 
2068   // move exception oop into fixed register
2069   __ move(exception_opr, exceptionOopOpr());
2070 
2071   if (unwind) {
2072     __ unwind_exception(exceptionOopOpr());
2073   } else {
2074     __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2075   }
2076 }
2077 
2078 
2079 void LIRGenerator::do_RoundFP(RoundFP* x) {
2080   assert(strict_fp_requires_explicit_rounding, "not required");
2081 
2082   LIRItem input(x->input(), this);
2083   input.load_item();
2084   LIR_Opr input_opr = input.result();
2085   assert(input_opr->is_register(), "why round if value is not in a register?");
2086   assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2087   if (input_opr->is_single_fpu()) {
2088     set_result(x, round_item(input_opr)); // This code path not currently taken
2089   } else {
2090     LIR_Opr result = new_register(T_DOUBLE);
2091     set_vreg_flag(result, must_start_in_memory);
2092     __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2093     set_result(x, result);
2094   }
2095 }
2096 
2097 // Here UnsafeGetRaw may have x->base() and x->index() be int or long
2098 // on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
2099 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2100   LIRItem base(x->base(), this);
2101   LIRItem idx(this);
2102 
2103   base.load_item();
2104   if (x->has_index()) {
2105     idx.set_instruction(x->index());
2106     idx.load_nonconstant();
2107   }
2108 
2109   LIR_Opr reg = rlock_result(x, x->basic_type());
2110 
2111   int   log2_scale = 0;
2112   if (x->has_index()) {
2113     log2_scale = x->log2_scale();
2114   }
2115 
2116   assert(!x->has_index() || idx.value() == x->index(), "should match");
2117 
2118   LIR_Opr base_op = base.result();
2119   LIR_Opr index_op = idx.result();
2120 #ifndef _LP64
2121   if (base_op->type() == T_LONG) {
2122     base_op = new_register(T_INT);
2123     __ convert(Bytecodes::_l2i, base.result(), base_op);
2124   }
2125   if (x->has_index()) {
2126     if (index_op->type() == T_LONG) {
2127       LIR_Opr long_index_op = index_op;
2128       if (index_op->is_constant()) {
2129         long_index_op = new_register(T_LONG);
2130         __ move(index_op, long_index_op);
2131       }
2132       index_op = new_register(T_INT);
2133       __ convert(Bytecodes::_l2i, long_index_op, index_op);
2134     } else {
2135       assert(x->index()->type()->tag() == intTag, "must be");
2136     }
2137   }
2138   // At this point base and index should be all ints.
2139   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2140   assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2141 #else
2142   if (x->has_index()) {
2143     if (index_op->type() == T_INT) {
2144       if (!index_op->is_constant()) {
2145         index_op = new_register(T_LONG);
2146         __ convert(Bytecodes::_i2l, idx.result(), index_op);
2147       }
2148     } else {
2149       assert(index_op->type() == T_LONG, "must be");
2150       if (index_op->is_constant()) {
2151         index_op = new_register(T_LONG);
2152         __ move(idx.result(), index_op);
2153       }
2154     }
2155   }
2156   // At this point base is a long non-constant
2157   // Index is a long register or a int constant.
2158   // We allow the constant to stay an int because that would allow us a more compact encoding by
2159   // embedding an immediate offset in the address expression. If we have a long constant, we have to
2160   // move it into a register first.
2161   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2162   assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2163                             (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2164 #endif
2165 
2166   BasicType dst_type = x->basic_type();
2167 
2168   LIR_Address* addr;
2169   if (index_op->is_constant()) {
2170     assert(log2_scale == 0, "must not have a scale");
2171     assert(index_op->type() == T_INT, "only int constants supported");
2172     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2173   } else {
2174 #ifdef X86
2175     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2176 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2177     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2178 #else
2179     if (index_op->is_illegal() || log2_scale == 0) {
2180       addr = new LIR_Address(base_op, index_op, dst_type);
2181     } else {
2182       LIR_Opr tmp = new_pointer_register();
2183       __ shift_left(index_op, log2_scale, tmp);
2184       addr = new LIR_Address(base_op, tmp, dst_type);
2185     }
2186 #endif
2187   }
2188 
2189   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2190     __ unaligned_move(addr, reg);
2191   } else {
2192     if (dst_type == T_OBJECT && x->is_wide()) {
2193       __ move_wide(addr, reg);
2194     } else {
2195       __ move(addr, reg);
2196     }
2197   }
2198 }
2199 
2200 
2201 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2202   int  log2_scale = 0;
2203   BasicType type = x->basic_type();
2204 
2205   if (x->has_index()) {
2206     log2_scale = x->log2_scale();
2207   }
2208 
2209   LIRItem base(x->base(), this);
2210   LIRItem value(x->value(), this);
2211   LIRItem idx(this);
2212 
2213   base.load_item();
2214   if (x->has_index()) {
2215     idx.set_instruction(x->index());
2216     idx.load_item();
2217   }
2218 
2219   if (type == T_BYTE || type == T_BOOLEAN) {
2220     value.load_byte_item();
2221   } else {
2222     value.load_item();
2223   }
2224 
2225   set_no_result(x);
2226 
2227   LIR_Opr base_op = base.result();
2228   LIR_Opr index_op = idx.result();
2229 
2230 #ifdef GENERATE_ADDRESS_IS_PREFERRED
2231   LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2232 #else
2233 #ifndef _LP64
2234   if (base_op->type() == T_LONG) {
2235     base_op = new_register(T_INT);
2236     __ convert(Bytecodes::_l2i, base.result(), base_op);
2237   }
2238   if (x->has_index()) {
2239     if (index_op->type() == T_LONG) {
2240       index_op = new_register(T_INT);
2241       __ convert(Bytecodes::_l2i, idx.result(), index_op);
2242     }
2243   }
2244   // At this point base and index should be all ints and not constants
2245   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2246   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2247 #else
2248   if (x->has_index()) {
2249     if (index_op->type() == T_INT) {
2250       index_op = new_register(T_LONG);
2251       __ convert(Bytecodes::_i2l, idx.result(), index_op);
2252     }
2253   }
2254   // At this point base and index are long and non-constant
2255   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2256   assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2257 #endif
2258 
2259   if (log2_scale != 0) {
2260     // temporary fix (platform dependent code without shift on Intel would be better)
2261     // TODO: ARM also allows embedded shift in the address
2262     LIR_Opr tmp = new_pointer_register();
2263     if (TwoOperandLIRForm) {
2264       __ move(index_op, tmp);
2265       index_op = tmp;
2266     }
2267     __ shift_left(index_op, log2_scale, tmp);
2268     if (!TwoOperandLIRForm) {
2269       index_op = tmp;
2270     }
2271   }
2272 
2273   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2274 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2275   __ move(value.result(), addr);
2276 }
2277 
2278 
2279 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2280   BasicType type = x->basic_type();
2281   LIRItem src(x->object(), this);
2282   LIRItem off(x->offset(), this);
2283 
2284   off.load_item();
2285   src.load_item();
2286 
2287   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2288 
2289   if (x->is_volatile()) {
2290     decorators |= MO_SEQ_CST;
2291   }
2292   if (type == T_BOOLEAN) {
2293     decorators |= C1_MASK_BOOLEAN;
2294   }
2295   if (is_reference_type(type)) {
2296     decorators |= ON_UNKNOWN_OOP_REF;
2297   }
2298 
2299   LIR_Opr result = rlock_result(x, type);
2300   access_load_at(decorators, type,
2301                  src, off.result(), result);
2302 }
2303 
2304 
2305 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2306   BasicType type = x->basic_type();
2307   LIRItem src(x->object(), this);
2308   LIRItem off(x->offset(), this);
2309   LIRItem data(x->value(), this);
2310 
2311   src.load_item();
2312   if (type == T_BOOLEAN || type == T_BYTE) {
2313     data.load_byte_item();
2314   } else {
2315     data.load_item();
2316   }
2317   off.load_item();
2318 
2319   set_no_result(x);
2320 
2321   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2322   if (is_reference_type(type)) {
2323     decorators |= ON_UNKNOWN_OOP_REF;
2324   }
2325   if (x->is_volatile()) {
2326     decorators |= MO_SEQ_CST;
2327   }
2328   access_store_at(decorators, type, src, off.result(), data.result());
2329 }
2330 
2331 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
2332   BasicType type = x->basic_type();
2333   LIRItem src(x->object(), this);
2334   LIRItem off(x->offset(), this);
2335   LIRItem value(x->value(), this);
2336 
2337   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;
2338 
2339   if (is_reference_type(type)) {
2340     decorators |= ON_UNKNOWN_OOP_REF;
2341   }
2342 
2343   LIR_Opr result;
2344   if (x->is_add()) {
2345     result = access_atomic_add_at(decorators, type, src, off, value);
2346   } else {
2347     result = access_atomic_xchg_at(decorators, type, src, off, value);
2348   }
2349   set_result(x, result);
2350 }
2351 
2352 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2353   int lng = x->length();
2354 
2355   for (int i = 0; i < lng; i++) {
2356     C1SwitchRange* one_range = x->at(i);
2357     int low_key = one_range->low_key();
2358     int high_key = one_range->high_key();
2359     BlockBegin* dest = one_range->sux();
2360     if (low_key == high_key) {
2361       __ cmp(lir_cond_equal, value, low_key);
2362       __ branch(lir_cond_equal, dest);
2363     } else if (high_key - low_key == 1) {
2364       __ cmp(lir_cond_equal, value, low_key);
2365       __ branch(lir_cond_equal, dest);
2366       __ cmp(lir_cond_equal, value, high_key);
2367       __ branch(lir_cond_equal, dest);
2368     } else {
2369       LabelObj* L = new LabelObj();
2370       __ cmp(lir_cond_less, value, low_key);
2371       __ branch(lir_cond_less, L->label());
2372       __ cmp(lir_cond_lessEqual, value, high_key);
2373       __ branch(lir_cond_lessEqual, dest);
2374       __ branch_destination(L->label());
2375     }
2376   }
2377   __ jump(default_sux);
2378 }
2379 
2380 
2381 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2382   SwitchRangeList* res = new SwitchRangeList();
2383   int len = x->length();
2384   if (len > 0) {
2385     BlockBegin* sux = x->sux_at(0);
2386     int key = x->lo_key();
2387     BlockBegin* default_sux = x->default_sux();
2388     C1SwitchRange* range = new C1SwitchRange(key, sux);
2389     for (int i = 0; i < len; i++, key++) {
2390       BlockBegin* new_sux = x->sux_at(i);
2391       if (sux == new_sux) {
2392         // still in same range
2393         range->set_high_key(key);
2394       } else {
2395         // skip tests which explicitly dispatch to the default
2396         if (sux != default_sux) {
2397           res->append(range);
2398         }
2399         range = new C1SwitchRange(key, new_sux);
2400       }
2401       sux = new_sux;
2402     }
2403     if (res->length() == 0 || res->last() != range)  res->append(range);
2404   }
2405   return res;
2406 }
2407 
2408 
2409 // we expect the keys to be sorted by increasing value
2410 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2411   SwitchRangeList* res = new SwitchRangeList();
2412   int len = x->length();
2413   if (len > 0) {
2414     BlockBegin* default_sux = x->default_sux();
2415     int key = x->key_at(0);
2416     BlockBegin* sux = x->sux_at(0);
2417     C1SwitchRange* range = new C1SwitchRange(key, sux);
2418     for (int i = 1; i < len; i++) {
2419       int new_key = x->key_at(i);
2420       BlockBegin* new_sux = x->sux_at(i);
2421       if (key+1 == new_key && sux == new_sux) {
2422         // still in same range
2423         range->set_high_key(new_key);
2424       } else {
2425         // skip tests which explicitly dispatch to the default
2426         if (range->sux() != default_sux) {
2427           res->append(range);
2428         }
2429         range = new C1SwitchRange(new_key, new_sux);
2430       }
2431       key = new_key;
2432       sux = new_sux;
2433     }
2434     if (res->length() == 0 || res->last() != range)  res->append(range);
2435   }
2436   return res;
2437 }
2438 
2439 
2440 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2441   LIRItem tag(x->tag(), this);
2442   tag.load_item();
2443   set_no_result(x);
2444 
2445   if (x->is_safepoint()) {
2446     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2447   }
2448 
2449   // move values into phi locations
2450   move_to_phi(x->state());
2451 
2452   int lo_key = x->lo_key();
2453   int len = x->length();
2454   assert(lo_key <= (lo_key + (len - 1)), "integer overflow");
2455   LIR_Opr value = tag.result();
2456 
2457   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2458     ciMethod* method = x->state()->scope()->method();
2459     ciMethodData* md = method->method_data_or_null();
2460     assert(md != NULL, "Sanity");
2461     ciProfileData* data = md->bci_to_data(x->state()->bci());
2462     assert(data != NULL, "must have profiling data");
2463     assert(data->is_MultiBranchData(), "bad profile data?");
2464     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2465     LIR_Opr md_reg = new_register(T_METADATA);
2466     __ metadata2reg(md->constant_encoding(), md_reg);
2467     LIR_Opr data_offset_reg = new_pointer_register();
2468     LIR_Opr tmp_reg = new_pointer_register();
2469 
2470     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2471     for (int i = 0; i < len; i++) {
2472       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2473       __ cmp(lir_cond_equal, value, i + lo_key);
2474       __ move(data_offset_reg, tmp_reg);
2475       __ cmove(lir_cond_equal,
2476                LIR_OprFact::intptrConst(count_offset),
2477                tmp_reg,
2478                data_offset_reg, T_INT);
2479     }
2480 
2481     LIR_Opr data_reg = new_pointer_register();
2482     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2483     __ move(data_addr, data_reg);
2484     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2485     __ move(data_reg, data_addr);
2486   }
2487 
2488   if (UseTableRanges) {
2489     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2490   } else {
2491     for (int i = 0; i < len; i++) {
2492       __ cmp(lir_cond_equal, value, i + lo_key);
2493       __ branch(lir_cond_equal, x->sux_at(i));
2494     }
2495     __ jump(x->default_sux());
2496   }
2497 }
2498 
2499 
2500 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2501   LIRItem tag(x->tag(), this);
2502   tag.load_item();
2503   set_no_result(x);
2504 
2505   if (x->is_safepoint()) {
2506     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2507   }
2508 
2509   // move values into phi locations
2510   move_to_phi(x->state());
2511 
2512   LIR_Opr value = tag.result();
2513   int len = x->length();
2514 
2515   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2516     ciMethod* method = x->state()->scope()->method();
2517     ciMethodData* md = method->method_data_or_null();
2518     assert(md != NULL, "Sanity");
2519     ciProfileData* data = md->bci_to_data(x->state()->bci());
2520     assert(data != NULL, "must have profiling data");
2521     assert(data->is_MultiBranchData(), "bad profile data?");
2522     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2523     LIR_Opr md_reg = new_register(T_METADATA);
2524     __ metadata2reg(md->constant_encoding(), md_reg);
2525     LIR_Opr data_offset_reg = new_pointer_register();
2526     LIR_Opr tmp_reg = new_pointer_register();
2527 
2528     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2529     for (int i = 0; i < len; i++) {
2530       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2531       __ cmp(lir_cond_equal, value, x->key_at(i));
2532       __ move(data_offset_reg, tmp_reg);
2533       __ cmove(lir_cond_equal,
2534                LIR_OprFact::intptrConst(count_offset),
2535                tmp_reg,
2536                data_offset_reg, T_INT);
2537     }
2538 
2539     LIR_Opr data_reg = new_pointer_register();
2540     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2541     __ move(data_addr, data_reg);
2542     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2543     __ move(data_reg, data_addr);
2544   }
2545 
2546   if (UseTableRanges) {
2547     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2548   } else {
2549     int len = x->length();
2550     for (int i = 0; i < len; i++) {
2551       __ cmp(lir_cond_equal, value, x->key_at(i));
2552       __ branch(lir_cond_equal, x->sux_at(i));
2553     }
2554     __ jump(x->default_sux());
2555   }
2556 }
2557 
2558 
2559 void LIRGenerator::do_Goto(Goto* x) {
2560   set_no_result(x);
2561 
2562   if (block()->next()->as_OsrEntry()) {
2563     // need to free up storage used for OSR entry point
2564     LIR_Opr osrBuffer = block()->next()->operand();
2565     BasicTypeList signature;
2566     signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2567     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2568     __ move(osrBuffer, cc->args()->at(0));
2569     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2570                          getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2571   }
2572 
2573   if (x->is_safepoint()) {
2574     ValueStack* state = x->state_before() ? x->state_before() : x->state();
2575 
2576     // increment backedge counter if needed
2577     CodeEmitInfo* info = state_for(x, state);
2578     increment_backedge_counter(info, x->profiled_bci());
2579     CodeEmitInfo* safepoint_info = state_for(x, state);
2580     __ safepoint(safepoint_poll_register(), safepoint_info);
2581   }
2582 
2583   // Gotos can be folded Ifs, handle this case.
2584   if (x->should_profile()) {
2585     ciMethod* method = x->profiled_method();
2586     assert(method != NULL, "method should be set if branch is profiled");
2587     ciMethodData* md = method->method_data_or_null();
2588     assert(md != NULL, "Sanity");
2589     ciProfileData* data = md->bci_to_data(x->profiled_bci());
2590     assert(data != NULL, "must have profiling data");
2591     int offset;
2592     if (x->direction() == Goto::taken) {
2593       assert(data->is_BranchData(), "need BranchData for two-way branches");
2594       offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2595     } else if (x->direction() == Goto::not_taken) {
2596       assert(data->is_BranchData(), "need BranchData for two-way branches");
2597       offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2598     } else {
2599       assert(data->is_JumpData(), "need JumpData for branches");
2600       offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2601     }
2602     LIR_Opr md_reg = new_register(T_METADATA);
2603     __ metadata2reg(md->constant_encoding(), md_reg);
2604 
2605     increment_counter(new LIR_Address(md_reg, offset,
2606                                       NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2607   }
2608 
2609   // emit phi-instruction move after safepoint since this simplifies
2610   // describing the state as the safepoint.
2611   move_to_phi(x->state());
2612 
2613   __ jump(x->default_sux());
2614 }
2615 
2616 /**
2617  * Emit profiling code if needed for arguments, parameters, return value types
2618  *
2619  * @param md                    MDO the code will update at runtime
2620  * @param md_base_offset        common offset in the MDO for this profile and subsequent ones
2621  * @param md_offset             offset in the MDO (on top of md_base_offset) for this profile
2622  * @param profiled_k            current profile
2623  * @param obj                   IR node for the object to be profiled
2624  * @param mdp                   register to hold the pointer inside the MDO (md + md_base_offset).
2625  *                              Set once we find an update to make and use for next ones.
2626  * @param not_null              true if we know obj cannot be null
2627  * @param signature_at_call_k   signature at call for obj
2628  * @param callee_signature_k    signature of callee for obj
2629  *                              at call and callee signatures differ at method handle call
2630  * @return                      the only klass we know will ever be seen at this profile point
2631  */
2632 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2633                                     Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2634                                     ciKlass* callee_signature_k) {
2635   ciKlass* result = NULL;
2636   bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2637   bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2638   // known not to be null or null bit already set and already set to
2639   // unknown: nothing we can do to improve profiling
2640   if (!do_null && !do_update) {
2641     return result;
2642   }
2643 
2644   ciKlass* exact_klass = NULL;
2645   Compilation* comp = Compilation::current();
2646   if (do_update) {
2647     // try to find exact type, using CHA if possible, so that loading
2648     // the klass from the object can be avoided
2649     ciType* type = obj->exact_type();
2650     if (type == NULL) {
2651       type = obj->declared_type();
2652       type = comp->cha_exact_type(type);
2653     }
2654     assert(type == NULL || type->is_klass(), "type should be class");
2655     exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2656 
2657     do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2658   }
2659 
2660   if (!do_null && !do_update) {
2661     return result;
2662   }
2663 
2664   ciKlass* exact_signature_k = NULL;
2665   if (do_update) {
2666     // Is the type from the signature exact (the only one possible)?
2667     exact_signature_k = signature_at_call_k->exact_klass();
2668     if (exact_signature_k == NULL) {
2669       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2670     } else {
2671       result = exact_signature_k;
2672       // Known statically. No need to emit any code: prevent
2673       // LIR_Assembler::emit_profile_type() from emitting useless code
2674       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2675     }
2676     // exact_klass and exact_signature_k can be both non NULL but
2677     // different if exact_klass is loaded after the ciObject for
2678     // exact_signature_k is created.
2679     if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2680       // sometimes the type of the signature is better than the best type
2681       // the compiler has
2682       exact_klass = exact_signature_k;
2683     }
2684     if (callee_signature_k != NULL &&
2685         callee_signature_k != signature_at_call_k) {
2686       ciKlass* improved_klass = callee_signature_k->exact_klass();
2687       if (improved_klass == NULL) {
2688         improved_klass = comp->cha_exact_type(callee_signature_k);
2689       }
2690       if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2691         exact_klass = exact_signature_k;
2692       }
2693     }
2694     do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2695   }
2696 
2697   if (!do_null && !do_update) {
2698     return result;
2699   }
2700 
2701   if (mdp == LIR_OprFact::illegalOpr) {
2702     mdp = new_register(T_METADATA);
2703     __ metadata2reg(md->constant_encoding(), mdp);
2704     if (md_base_offset != 0) {
2705       LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2706       mdp = new_pointer_register();
2707       __ leal(LIR_OprFact::address(base_type_address), mdp);
2708     }
2709   }
2710   LIRItem value(obj, this);
2711   value.load_item();
2712   __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2713                   value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2714   return result;
2715 }
2716 
2717 // profile parameters on entry to the root of the compilation
2718 void LIRGenerator::profile_parameters(Base* x) {
2719   if (compilation()->profile_parameters()) {
2720     CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2721     ciMethodData* md = scope()->method()->method_data_or_null();
2722     assert(md != NULL, "Sanity");
2723 
2724     if (md->parameters_type_data() != NULL) {
2725       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2726       ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
2727       LIR_Opr mdp = LIR_OprFact::illegalOpr;
2728       for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2729         LIR_Opr src = args->at(i);
2730         assert(!src->is_illegal(), "check");
2731         BasicType t = src->type();
2732         if (is_reference_type(t)) {
2733           intptr_t profiled_k = parameters->type(j);
2734           Local* local = x->state()->local_at(java_index)->as_Local();
2735           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2736                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2737                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2738           // If the profile is known statically set it once for all and do not emit any code
2739           if (exact != NULL) {
2740             md->set_parameter_type(j, exact);
2741           }
2742           j++;
2743         }
2744         java_index += type2size[t];
2745       }
2746     }
2747   }
2748 }
2749 
2750 void LIRGenerator::do_Base(Base* x) {
2751   __ std_entry(LIR_OprFact::illegalOpr);
2752   // Emit moves from physical registers / stack slots to virtual registers
2753   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2754   IRScope* irScope = compilation()->hir()->top_scope();
2755   int java_index = 0;
2756   for (int i = 0; i < args->length(); i++) {
2757     LIR_Opr src = args->at(i);
2758     assert(!src->is_illegal(), "check");
2759     BasicType t = src->type();
2760 
2761     // Types which are smaller than int are passed as int, so
2762     // correct the type which passed.
2763     switch (t) {
2764     case T_BYTE:
2765     case T_BOOLEAN:
2766     case T_SHORT:
2767     case T_CHAR:
2768       t = T_INT;
2769       break;
2770     default:
2771       break;
2772     }
2773 
2774     LIR_Opr dest = new_register(t);
2775     __ move(src, dest);
2776 
2777     // Assign new location to Local instruction for this local
2778     Local* local = x->state()->local_at(java_index)->as_Local();
2779     assert(local != NULL, "Locals for incoming arguments must have been created");
2780 #ifndef __SOFTFP__
2781     // The java calling convention passes double as long and float as int.
2782     assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2783 #endif // __SOFTFP__
2784     local->set_operand(dest);
2785     _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2786     java_index += type2size[t];
2787   }
2788 
2789   if (compilation()->env()->dtrace_method_probes()) {
2790     BasicTypeList signature;
2791     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
2792     signature.append(T_METADATA); // Method*
2793     LIR_OprList* args = new LIR_OprList();
2794     args->append(getThreadPointer());
2795     LIR_Opr meth = new_register(T_METADATA);
2796     __ metadata2reg(method()->constant_encoding(), meth);
2797     args->append(meth);
2798     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2799   }
2800 
2801   if (method()->is_synchronized()) {
2802     LIR_Opr obj;
2803     if (method()->is_static()) {
2804       obj = new_register(T_OBJECT);
2805       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2806     } else {
2807       Local* receiver = x->state()->local_at(0)->as_Local();
2808       assert(receiver != NULL, "must already exist");
2809       obj = receiver->operand();
2810     }
2811     assert(obj->is_valid(), "must be valid");
2812 
2813     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2814       LIR_Opr lock = syncLockOpr();
2815       __ load_stack_address_monitor(0, lock);
2816 
2817       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2818       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2819 
2820       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2821       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2822     }
2823   }
2824   if (compilation()->age_code()) {
2825     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2826     decrement_age(info);
2827   }
2828   // increment invocation counters if needed
2829   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2830     profile_parameters(x);
2831     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2832     increment_invocation_counter(info);
2833   }
2834 
2835   // all blocks with a successor must end with an unconditional jump
2836   // to the successor even if they are consecutive
2837   __ jump(x->default_sux());
2838 }
2839 
2840 
2841 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2842   // construct our frame and model the production of incoming pointer
2843   // to the OSR buffer.
2844   __ osr_entry(LIR_Assembler::osrBufferPointer());
2845   LIR_Opr result = rlock_result(x);
2846   __ move(LIR_Assembler::osrBufferPointer(), result);
2847 }
2848 
2849 
2850 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2851   assert(args->length() == arg_list->length(),
2852          "args=%d, arg_list=%d", args->length(), arg_list->length());
2853   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2854     LIRItem* param = args->at(i);
2855     LIR_Opr loc = arg_list->at(i);
2856     if (loc->is_register()) {
2857       param->load_item_force(loc);
2858     } else {
2859       LIR_Address* addr = loc->as_address_ptr();
2860       param->load_for_store(addr->type());
2861       if (addr->type() == T_OBJECT) {
2862         __ move_wide(param->result(), addr);
2863       } else
2864         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2865           __ unaligned_move(param->result(), addr);
2866         } else {
2867           __ move(param->result(), addr);
2868         }
2869     }
2870   }
2871 
2872   if (x->has_receiver()) {
2873     LIRItem* receiver = args->at(0);
2874     LIR_Opr loc = arg_list->at(0);
2875     if (loc->is_register()) {
2876       receiver->load_item_force(loc);
2877     } else {
2878       assert(loc->is_address(), "just checking");
2879       receiver->load_for_store(T_OBJECT);
2880       __ move_wide(receiver->result(), loc->as_address_ptr());
2881     }
2882   }
2883 }
2884 
2885 
2886 // Visits all arguments, returns appropriate items without loading them
2887 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2888   LIRItemList* argument_items = new LIRItemList();
2889   if (x->has_receiver()) {
2890     LIRItem* receiver = new LIRItem(x->receiver(), this);
2891     argument_items->append(receiver);
2892   }
2893   for (int i = 0; i < x->number_of_arguments(); i++) {
2894     LIRItem* param = new LIRItem(x->argument_at(i), this);
2895     argument_items->append(param);
2896   }
2897   return argument_items;
2898 }
2899 
2900 
2901 // The invoke with receiver has following phases:
2902 //   a) traverse and load/lock receiver;
2903 //   b) traverse all arguments -> item-array (invoke_visit_argument)
2904 //   c) push receiver on stack
2905 //   d) load each of the items and push on stack
2906 //   e) unlock receiver
2907 //   f) move receiver into receiver-register %o0
2908 //   g) lock result registers and emit call operation
2909 //
2910 // Before issuing a call, we must spill-save all values on stack
2911 // that are in caller-save register. "spill-save" moves those registers
2912 // either in a free callee-save register or spills them if no free
2913 // callee save register is available.
2914 //
2915 // The problem is where to invoke spill-save.
2916 // - if invoked between e) and f), we may lock callee save
2917 //   register in "spill-save" that destroys the receiver register
2918 //   before f) is executed
2919 // - if we rearrange f) to be earlier (by loading %o0) it
2920 //   may destroy a value on the stack that is currently in %o0
2921 //   and is waiting to be spilled
2922 // - if we keep the receiver locked while doing spill-save,
2923 //   we cannot spill it as it is spill-locked
2924 //
2925 void LIRGenerator::do_Invoke(Invoke* x) {
2926   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2927 
2928   LIR_OprList* arg_list = cc->args();
2929   LIRItemList* args = invoke_visit_arguments(x);
2930   LIR_Opr receiver = LIR_OprFact::illegalOpr;
2931 
2932   // setup result register
2933   LIR_Opr result_register = LIR_OprFact::illegalOpr;
2934   if (x->type() != voidType) {
2935     result_register = result_register_for(x->type());
2936   }
2937 
2938   CodeEmitInfo* info = state_for(x, x->state());
2939 
2940   invoke_load_arguments(x, args, arg_list);
2941 
2942   if (x->has_receiver()) {
2943     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2944     receiver = args->at(0)->result();
2945   }
2946 
2947   // emit invoke code
2948   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2949 
2950   // JSR 292
2951   // Preserve the SP over MethodHandle call sites, if needed.
2952   ciMethod* target = x->target();
2953   bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2954                                   target->is_method_handle_intrinsic() ||
2955                                   target->is_compiled_lambda_form());
2956   if (is_method_handle_invoke) {
2957     info->set_is_method_handle_invoke(true);
2958     if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2959         __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2960     }
2961   }
2962 
2963   switch (x->code()) {
2964     case Bytecodes::_invokestatic:
2965       __ call_static(target, result_register,
2966                      SharedRuntime::get_resolve_static_call_stub(),
2967                      arg_list, info);
2968       break;
2969     case Bytecodes::_invokespecial:
2970     case Bytecodes::_invokevirtual:
2971     case Bytecodes::_invokeinterface:
2972       // for loaded and final (method or class) target we still produce an inline cache,
2973       // in order to be able to call mixed mode
2974       if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {
2975         __ call_opt_virtual(target, receiver, result_register,
2976                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2977                             arg_list, info);
2978       } else {
2979         __ call_icvirtual(target, receiver, result_register,
2980                           SharedRuntime::get_resolve_virtual_call_stub(),
2981                           arg_list, info);
2982       }
2983       break;
2984     case Bytecodes::_invokedynamic: {
2985       __ call_dynamic(target, receiver, result_register,
2986                       SharedRuntime::get_resolve_static_call_stub(),
2987                       arg_list, info);
2988       break;
2989     }
2990     default:
2991       fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
2992       break;
2993   }
2994 
2995   // JSR 292
2996   // Restore the SP after MethodHandle call sites, if needed.
2997   if (is_method_handle_invoke
2998       && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2999     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
3000   }
3001 
3002   if (result_register->is_valid()) {
3003     LIR_Opr result = rlock_result(x);
3004     __ move(result_register, result);
3005   }
3006 }
3007 
3008 
3009 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3010   assert(x->number_of_arguments() == 1, "wrong type");
3011   LIRItem value       (x->argument_at(0), this);
3012   LIR_Opr reg = rlock_result(x);
3013   value.load_item();
3014   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3015   __ move(tmp, reg);
3016 }
3017 
3018 
3019 
3020 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3021 void LIRGenerator::do_IfOp(IfOp* x) {
3022 #ifdef ASSERT
3023   {
3024     ValueTag xtag = x->x()->type()->tag();
3025     ValueTag ttag = x->tval()->type()->tag();
3026     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3027     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3028     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3029   }
3030 #endif
3031 
3032   LIRItem left(x->x(), this);
3033   LIRItem right(x->y(), this);
3034   left.load_item();
3035   if (can_inline_as_constant(right.value())) {
3036     right.dont_load_item();
3037   } else {
3038     right.load_item();
3039   }
3040 
3041   LIRItem t_val(x->tval(), this);
3042   LIRItem f_val(x->fval(), this);
3043   t_val.dont_load_item();
3044   f_val.dont_load_item();
3045   LIR_Opr reg = rlock_result(x);
3046 
3047   __ cmp(lir_cond(x->cond()), left.result(), right.result());
3048   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3049 }
3050 
3051 #ifdef JFR_HAVE_INTRINSICS
3052 
3053 void LIRGenerator::do_getEventWriter(Intrinsic* x) {
3054   LabelObj* L_end = new LabelObj();
3055 
3056   // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
3057   // meaning of these two is mixed up (see JDK-8026837).
3058   LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),
3059                                            in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR),
3060                                            T_ADDRESS);
3061   LIR_Opr result = rlock_result(x);
3062   __ move(LIR_OprFact::oopConst(NULL), result);
3063   LIR_Opr jobj = new_register(T_METADATA);
3064   __ move_wide(jobj_addr, jobj);
3065   __ cmp(lir_cond_equal, jobj, LIR_OprFact::metadataConst(0));
3066   __ branch(lir_cond_equal, L_end->label());
3067 
3068   access_load(IN_NATIVE, T_OBJECT, LIR_OprFact::address(new LIR_Address(jobj, T_OBJECT)), result);
3069 
3070   __ branch_destination(L_end->label());
3071 }
3072 
3073 #endif
3074 
3075 
3076 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3077   assert(x->number_of_arguments() == 0, "wrong type");
3078   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3079   BasicTypeList signature;
3080   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3081   LIR_Opr reg = result_register_for(x->type());
3082   __ call_runtime_leaf(routine, getThreadTemp(),
3083                        reg, new LIR_OprList());
3084   LIR_Opr result = rlock_result(x);
3085   __ move(reg, result);
3086 }
3087 
3088 
3089 
3090 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3091   switch (x->id()) {
3092   case vmIntrinsics::_intBitsToFloat      :
3093   case vmIntrinsics::_doubleToRawLongBits :
3094   case vmIntrinsics::_longBitsToDouble    :
3095   case vmIntrinsics::_floatToRawIntBits   : {
3096     do_FPIntrinsics(x);
3097     break;
3098   }
3099 
3100 #ifdef JFR_HAVE_INTRINSICS
3101   case vmIntrinsics::_getEventWriter:
3102     do_getEventWriter(x);
3103     break;
3104   case vmIntrinsics::_counterTime:
3105     do_RuntimeCall(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), x);
3106     break;
3107 #endif
3108 
3109   case vmIntrinsics::_currentTimeMillis:
3110     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x);
3111     break;
3112 
3113   case vmIntrinsics::_nanoTime:
3114     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x);
3115     break;
3116 
3117   case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
3118   case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
3119   case vmIntrinsics::_isPrimitive:    do_isPrimitive(x);   break;
3120   case vmIntrinsics::_getModifiers:   do_getModifiers(x);  break;
3121   case vmIntrinsics::_getClass:       do_getClass(x);      break;
3122   case vmIntrinsics::_currentThread:  do_currentThread(x); break;
3123   case vmIntrinsics::_getObjectSize:  do_getObjectSize(x); break;
3124 
3125   case vmIntrinsics::_dlog:           // fall through
3126   case vmIntrinsics::_dlog10:         // fall through
3127   case vmIntrinsics::_dabs:           // fall through
3128   case vmIntrinsics::_dsqrt:          // fall through
3129   case vmIntrinsics::_dtan:           // fall through
3130   case vmIntrinsics::_dsin :          // fall through
3131   case vmIntrinsics::_dcos :          // fall through
3132   case vmIntrinsics::_dexp :          // fall through
3133   case vmIntrinsics::_dpow :          do_MathIntrinsic(x); break;
3134   case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
3135 
3136   case vmIntrinsics::_fmaD:           do_FmaIntrinsic(x); break;
3137   case vmIntrinsics::_fmaF:           do_FmaIntrinsic(x); break;
3138 
3139   // java.nio.Buffer.checkIndex
3140   case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
3141 
3142   case vmIntrinsics::_compareAndSetReference:
3143     do_CompareAndSwap(x, objectType);
3144     break;
3145   case vmIntrinsics::_compareAndSetInt:
3146     do_CompareAndSwap(x, intType);
3147     break;
3148   case vmIntrinsics::_compareAndSetLong:
3149     do_CompareAndSwap(x, longType);
3150     break;
3151 
3152   case vmIntrinsics::_loadFence :
3153     __ membar_acquire();
3154     break;
3155   case vmIntrinsics::_storeFence:
3156     __ membar_release();
3157     break;
3158   case vmIntrinsics::_storeStoreFence:
3159     __ membar_storestore();
3160     break;
3161   case vmIntrinsics::_fullFence :
3162     __ membar();
3163     break;
3164   case vmIntrinsics::_onSpinWait:
3165     __ on_spin_wait();
3166     break;
3167   case vmIntrinsics::_Reference_get:
3168     do_Reference_get(x);
3169     break;
3170 
3171   case vmIntrinsics::_updateCRC32:
3172   case vmIntrinsics::_updateBytesCRC32:
3173   case vmIntrinsics::_updateByteBufferCRC32:
3174     do_update_CRC32(x);
3175     break;
3176 
3177   case vmIntrinsics::_updateBytesCRC32C:
3178   case vmIntrinsics::_updateDirectByteBufferCRC32C:
3179     do_update_CRC32C(x);
3180     break;
3181 
3182   case vmIntrinsics::_vectorizedMismatch:
3183     do_vectorizedMismatch(x);
3184     break;
3185 
3186   case vmIntrinsics::_blackhole:
3187     do_blackhole(x);
3188     break;
3189 
3190   default: ShouldNotReachHere(); break;
3191   }
3192 }
3193 
3194 void LIRGenerator::profile_arguments(ProfileCall* x) {
3195   if (compilation()->profile_arguments()) {
3196     int bci = x->bci_of_invoke();
3197     ciMethodData* md = x->method()->method_data_or_null();
3198     assert(md != NULL, "Sanity");
3199     ciProfileData* data = md->bci_to_data(bci);
3200     if (data != NULL) {
3201       if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3202           (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3203         ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3204         int base_offset = md->byte_offset_of_slot(data, extra);
3205         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3206         ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3207 
3208         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3209         int start = 0;
3210         int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3211         if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3212           // first argument is not profiled at call (method handle invoke)
3213           assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3214           start = 1;
3215         }
3216         ciSignature* callee_signature = x->callee()->signature();
3217         // method handle call to virtual method
3218         bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3219         ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3220 
3221         bool ignored_will_link;
3222         ciSignature* signature_at_call = NULL;
3223         x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3224         ciSignatureStream signature_at_call_stream(signature_at_call);
3225 
3226         // if called through method handle invoke, some arguments may have been popped
3227         for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3228           int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3229           ciKlass* exact = profile_type(md, base_offset, off,
3230               args->type(i), x->profiled_arg_at(i+start), mdp,
3231               !x->arg_needs_null_check(i+start),
3232               signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3233           if (exact != NULL) {
3234             md->set_argument_type(bci, i, exact);
3235           }
3236         }
3237       } else {
3238 #ifdef ASSERT
3239         Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3240         int n = x->nb_profiled_args();
3241         assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3242             (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3243             "only at JSR292 bytecodes");
3244 #endif
3245       }
3246     }
3247   }
3248 }
3249 
3250 // profile parameters on entry to an inlined method
3251 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3252   if (compilation()->profile_parameters() && x->inlined()) {
3253     ciMethodData* md = x->callee()->method_data_or_null();
3254     if (md != NULL) {
3255       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3256       if (parameters_type_data != NULL) {
3257         ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
3258         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3259         bool has_receiver = !x->callee()->is_static();
3260         ciSignature* sig = x->callee()->signature();
3261         ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3262         int i = 0; // to iterate on the Instructions
3263         Value arg = x->recv();
3264         bool not_null = false;
3265         int bci = x->bci_of_invoke();
3266         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3267         // The first parameter is the receiver so that's what we start
3268         // with if it exists. One exception is method handle call to
3269         // virtual method: the receiver is in the args list
3270         if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3271           i = 1;
3272           arg = x->profiled_arg_at(0);
3273           not_null = !x->arg_needs_null_check(0);
3274         }
3275         int k = 0; // to iterate on the profile data
3276         for (;;) {
3277           intptr_t profiled_k = parameters->type(k);
3278           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3279                                         in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3280                                         profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3281           // If the profile is known statically set it once for all and do not emit any code
3282           if (exact != NULL) {
3283             md->set_parameter_type(k, exact);
3284           }
3285           k++;
3286           if (k >= parameters_type_data->number_of_parameters()) {
3287 #ifdef ASSERT
3288             int extra = 0;
3289             if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3290                 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3291                 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3292               extra += 1;
3293             }
3294             assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3295 #endif
3296             break;
3297           }
3298           arg = x->profiled_arg_at(i);
3299           not_null = !x->arg_needs_null_check(i);
3300           i++;
3301         }
3302       }
3303     }
3304   }
3305 }
3306 
3307 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3308   // Need recv in a temporary register so it interferes with the other temporaries
3309   LIR_Opr recv = LIR_OprFact::illegalOpr;
3310   LIR_Opr mdo = new_register(T_METADATA);
3311   // tmp is used to hold the counters on SPARC
3312   LIR_Opr tmp = new_pointer_register();
3313 
3314   if (x->nb_profiled_args() > 0) {
3315     profile_arguments(x);
3316   }
3317 
3318   // profile parameters on inlined method entry including receiver
3319   if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3320     profile_parameters_at_call(x);
3321   }
3322 
3323   if (x->recv() != NULL) {
3324     LIRItem value(x->recv(), this);
3325     value.load_item();
3326     recv = new_register(T_OBJECT);
3327     __ move(value.result(), recv);
3328   }
3329   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3330 }
3331 
3332 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3333   int bci = x->bci_of_invoke();
3334   ciMethodData* md = x->method()->method_data_or_null();
3335   assert(md != NULL, "Sanity");
3336   ciProfileData* data = md->bci_to_data(bci);
3337   if (data != NULL) {
3338     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3339     ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3340     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3341 
3342     bool ignored_will_link;
3343     ciSignature* signature_at_call = NULL;
3344     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3345 
3346     // The offset within the MDO of the entry to update may be too large
3347     // to be used in load/store instructions on some platforms. So have
3348     // profile_type() compute the address of the profile in a register.
3349     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3350         ret->type(), x->ret(), mdp,
3351         !x->needs_null_check(),
3352         signature_at_call->return_type()->as_klass(),
3353         x->callee()->signature()->return_type()->as_klass());
3354     if (exact != NULL) {
3355       md->set_return_type(bci, exact);
3356     }
3357   }
3358 }
3359 
3360 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3361   // We can safely ignore accessors here, since c2 will inline them anyway,
3362   // accessors are also always mature.
3363   if (!x->inlinee()->is_accessor()) {
3364     CodeEmitInfo* info = state_for(x, x->state(), true);
3365     // Notify the runtime very infrequently only to take care of counter overflows
3366     int freq_log = Tier23InlineeNotifyFreqLog;
3367     double scale;
3368     if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3369       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3370     }
3371     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3372   }
3373 }
3374 
3375 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3376   if (compilation()->count_backedges()) {
3377 #if defined(X86) && !defined(_LP64)
3378     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3379     LIR_Opr left_copy = new_register(left->type());
3380     __ move(left, left_copy);
3381     __ cmp(cond, left_copy, right);
3382 #else
3383     __ cmp(cond, left, right);
3384 #endif
3385     LIR_Opr step = new_register(T_INT);
3386     LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment);
3387     LIR_Opr zero = LIR_OprFact::intConst(0);
3388     __ cmove(cond,
3389         (left_bci < bci) ? plus_one : zero,
3390         (right_bci < bci) ? plus_one : zero,
3391         step, left->type());
3392     increment_backedge_counter(info, step, bci);
3393   }
3394 }
3395 
3396 
3397 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) {
3398   int freq_log = 0;
3399   int level = compilation()->env()->comp_level();
3400   if (level == CompLevel_limited_profile) {
3401     freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3402   } else if (level == CompLevel_full_profile) {
3403     freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3404   } else {
3405     ShouldNotReachHere();
3406   }
3407   // Increment the appropriate invocation/backedge counter and notify the runtime.
3408   double scale;
3409   if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3410     freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3411   }
3412   increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
3413 }
3414 
3415 void LIRGenerator::decrement_age(CodeEmitInfo* info) {
3416   ciMethod* method = info->scope()->method();
3417   MethodCounters* mc_adr = method->ensure_method_counters();
3418   if (mc_adr != NULL) {
3419     LIR_Opr mc = new_pointer_register();
3420     __ move(LIR_OprFact::intptrConst(mc_adr), mc);
3421     int offset = in_bytes(MethodCounters::nmethod_age_offset());
3422     LIR_Address* counter = new LIR_Address(mc, offset, T_INT);
3423     LIR_Opr result = new_register(T_INT);
3424     __ load(counter, result);
3425     __ sub(result, LIR_OprFact::intConst(1), result);
3426     __ store(result, counter);
3427     // DeoptimizeStub will reexecute from the current state in code info.
3428     CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
3429                                          Deoptimization::Action_make_not_entrant);
3430     __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
3431     __ branch(lir_cond_lessEqual, deopt);
3432   }
3433 }
3434 
3435 
3436 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3437                                                 ciMethod *method, LIR_Opr step, int frequency,
3438                                                 int bci, bool backedge, bool notify) {
3439   assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3440   int level = _compilation->env()->comp_level();
3441   assert(level > CompLevel_simple, "Shouldn't be here");
3442 
3443   int offset = -1;
3444   LIR_Opr counter_holder = NULL;
3445   if (level == CompLevel_limited_profile) {
3446     MethodCounters* counters_adr = method->ensure_method_counters();
3447     if (counters_adr == NULL) {
3448       bailout("method counters allocation failed");
3449       return;
3450     }
3451     counter_holder = new_pointer_register();
3452     __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3453     offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3454                                  MethodCounters::invocation_counter_offset());
3455   } else if (level == CompLevel_full_profile) {
3456     counter_holder = new_register(T_METADATA);
3457     offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3458                                  MethodData::invocation_counter_offset());
3459     ciMethodData* md = method->method_data_or_null();
3460     assert(md != NULL, "Sanity");
3461     __ metadata2reg(md->constant_encoding(), counter_holder);
3462   } else {
3463     ShouldNotReachHere();
3464   }
3465   LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3466   LIR_Opr result = new_register(T_INT);
3467   __ load(counter, result);
3468   __ add(result, step, result);
3469   __ store(result, counter);
3470   if (notify && (!backedge || UseOnStackReplacement)) {
3471     LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
3472     // The bci for info can point to cmp for if's we want the if bci
3473     CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3474     int freq = frequency << InvocationCounter::count_shift;
3475     if (freq == 0) {
3476       if (!step->is_constant()) {
3477         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3478         __ branch(lir_cond_notEqual, overflow);
3479       } else {
3480         __ branch(lir_cond_always, overflow);
3481       }
3482     } else {
3483       LIR_Opr mask = load_immediate(freq, T_INT);
3484       if (!step->is_constant()) {
3485         // If step is 0, make sure the overflow check below always fails
3486         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3487         __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT);
3488       }
3489       __ logical_and(result, mask, result);
3490       __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3491       __ branch(lir_cond_equal, overflow);
3492     }
3493     __ branch_destination(overflow->continuation());
3494   }
3495 }
3496 
3497 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3498   LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3499   BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3500 
3501   if (x->pass_thread()) {
3502     signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3503     args->append(getThreadPointer());
3504   }
3505 
3506   for (int i = 0; i < x->number_of_arguments(); i++) {
3507     Value a = x->argument_at(i);
3508     LIRItem* item = new LIRItem(a, this);
3509     item->load_item();
3510     args->append(item->result());
3511     signature->append(as_BasicType(a->type()));
3512   }
3513 
3514   LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3515   if (x->type() == voidType) {
3516     set_no_result(x);
3517   } else {
3518     __ move(result, rlock_result(x));
3519   }
3520 }
3521 
3522 #ifdef ASSERT
3523 void LIRGenerator::do_Assert(Assert *x) {
3524   ValueTag tag = x->x()->type()->tag();
3525   If::Condition cond = x->cond();
3526 
3527   LIRItem xitem(x->x(), this);
3528   LIRItem yitem(x->y(), this);
3529   LIRItem* xin = &xitem;
3530   LIRItem* yin = &yitem;
3531 
3532   assert(tag == intTag, "Only integer assertions are valid!");
3533 
3534   xin->load_item();
3535   yin->dont_load_item();
3536 
3537   set_no_result(x);
3538 
3539   LIR_Opr left = xin->result();
3540   LIR_Opr right = yin->result();
3541 
3542   __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3543 }
3544 #endif
3545 
3546 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3547 
3548 
3549   Instruction *a = x->x();
3550   Instruction *b = x->y();
3551   if (!a || StressRangeCheckElimination) {
3552     assert(!b || StressRangeCheckElimination, "B must also be null");
3553 
3554     CodeEmitInfo *info = state_for(x, x->state());
3555     CodeStub* stub = new PredicateFailedStub(info);
3556 
3557     __ jump(stub);
3558   } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3559     int a_int = a->type()->as_IntConstant()->value();
3560     int b_int = b->type()->as_IntConstant()->value();
3561 
3562     bool ok = false;
3563 
3564     switch(x->cond()) {
3565       case Instruction::eql: ok = (a_int == b_int); break;
3566       case Instruction::neq: ok = (a_int != b_int); break;
3567       case Instruction::lss: ok = (a_int < b_int); break;
3568       case Instruction::leq: ok = (a_int <= b_int); break;
3569       case Instruction::gtr: ok = (a_int > b_int); break;
3570       case Instruction::geq: ok = (a_int >= b_int); break;
3571       case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3572       case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3573       default: ShouldNotReachHere();
3574     }
3575 
3576     if (ok) {
3577 
3578       CodeEmitInfo *info = state_for(x, x->state());
3579       CodeStub* stub = new PredicateFailedStub(info);
3580 
3581       __ jump(stub);
3582     }
3583   } else {
3584 
3585     ValueTag tag = x->x()->type()->tag();
3586     If::Condition cond = x->cond();
3587     LIRItem xitem(x->x(), this);
3588     LIRItem yitem(x->y(), this);
3589     LIRItem* xin = &xitem;
3590     LIRItem* yin = &yitem;
3591 
3592     assert(tag == intTag, "Only integer deoptimizations are valid!");
3593 
3594     xin->load_item();
3595     yin->dont_load_item();
3596     set_no_result(x);
3597 
3598     LIR_Opr left = xin->result();
3599     LIR_Opr right = yin->result();
3600 
3601     CodeEmitInfo *info = state_for(x, x->state());
3602     CodeStub* stub = new PredicateFailedStub(info);
3603 
3604     __ cmp(lir_cond(cond), left, right);
3605     __ branch(lir_cond(cond), stub);
3606   }
3607 }
3608 
3609 void LIRGenerator::do_blackhole(Intrinsic *x) {
3610   assert(!x->has_receiver(), "Should have been checked before: only static methods here");
3611   for (int c = 0; c < x->number_of_arguments(); c++) {
3612     // Load the argument
3613     LIRItem vitem(x->argument_at(c), this);
3614     vitem.load_item();
3615     // ...and leave it unused.
3616   }
3617 }
3618 
3619 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3620   LIRItemList args(1);
3621   LIRItem value(arg1, this);
3622   args.append(&value);
3623   BasicTypeList signature;
3624   signature.append(as_BasicType(arg1->type()));
3625 
3626   return call_runtime(&signature, &args, entry, result_type, info);
3627 }
3628 
3629 
3630 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3631   LIRItemList args(2);
3632   LIRItem value1(arg1, this);
3633   LIRItem value2(arg2, this);
3634   args.append(&value1);
3635   args.append(&value2);
3636   BasicTypeList signature;
3637   signature.append(as_BasicType(arg1->type()));
3638   signature.append(as_BasicType(arg2->type()));
3639 
3640   return call_runtime(&signature, &args, entry, result_type, info);
3641 }
3642 
3643 
3644 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3645                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3646   // get a result register
3647   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3648   LIR_Opr result = LIR_OprFact::illegalOpr;
3649   if (result_type->tag() != voidTag) {
3650     result = new_register(result_type);
3651     phys_reg = result_register_for(result_type);
3652   }
3653 
3654   // move the arguments into the correct location
3655   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3656   assert(cc->length() == args->length(), "argument mismatch");
3657   for (int i = 0; i < args->length(); i++) {
3658     LIR_Opr arg = args->at(i);
3659     LIR_Opr loc = cc->at(i);
3660     if (loc->is_register()) {
3661       __ move(arg, loc);
3662     } else {
3663       LIR_Address* addr = loc->as_address_ptr();
3664 //           if (!can_store_as_constant(arg)) {
3665 //             LIR_Opr tmp = new_register(arg->type());
3666 //             __ move(arg, tmp);
3667 //             arg = tmp;
3668 //           }
3669       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3670         __ unaligned_move(arg, addr);
3671       } else {
3672         __ move(arg, addr);
3673       }
3674     }
3675   }
3676 
3677   if (info) {
3678     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3679   } else {
3680     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3681   }
3682   if (result->is_valid()) {
3683     __ move(phys_reg, result);
3684   }
3685   return result;
3686 }
3687 
3688 
3689 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3690                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3691   // get a result register
3692   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3693   LIR_Opr result = LIR_OprFact::illegalOpr;
3694   if (result_type->tag() != voidTag) {
3695     result = new_register(result_type);
3696     phys_reg = result_register_for(result_type);
3697   }
3698 
3699   // move the arguments into the correct location
3700   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3701 
3702   assert(cc->length() == args->length(), "argument mismatch");
3703   for (int i = 0; i < args->length(); i++) {
3704     LIRItem* arg = args->at(i);
3705     LIR_Opr loc = cc->at(i);
3706     if (loc->is_register()) {
3707       arg->load_item_force(loc);
3708     } else {
3709       LIR_Address* addr = loc->as_address_ptr();
3710       arg->load_for_store(addr->type());
3711       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3712         __ unaligned_move(arg->result(), addr);
3713       } else {
3714         __ move(arg->result(), addr);
3715       }
3716     }
3717   }
3718 
3719   if (info) {
3720     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3721   } else {
3722     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3723   }
3724   if (result->is_valid()) {
3725     __ move(phys_reg, result);
3726   }
3727   return result;
3728 }
3729 
3730 void LIRGenerator::do_MemBar(MemBar* x) {
3731   LIR_Code code = x->code();
3732   switch(code) {
3733   case lir_membar_acquire   : __ membar_acquire(); break;
3734   case lir_membar_release   : __ membar_release(); break;
3735   case lir_membar           : __ membar(); break;
3736   case lir_membar_loadload  : __ membar_loadload(); break;
3737   case lir_membar_storestore: __ membar_storestore(); break;
3738   case lir_membar_loadstore : __ membar_loadstore(); break;
3739   case lir_membar_storeload : __ membar_storeload(); break;
3740   default                   : ShouldNotReachHere(); break;
3741   }
3742 }
3743 
3744 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3745   LIR_Opr value_fixed = rlock_byte(T_BYTE);
3746   if (TwoOperandLIRForm) {
3747     __ move(value, value_fixed);
3748     __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3749   } else {
3750     __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3751   }
3752   LIR_Opr klass = new_register(T_METADATA);
3753   load_klass(array, klass, null_check_info);
3754   null_check_info = NULL;
3755   LIR_Opr layout = new_register(T_INT);
3756   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3757   int diffbit = Klass::layout_helper_boolean_diffbit();
3758   __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3759   __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3760   __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3761   value = value_fixed;
3762   return value;
3763 }
3764 
3765 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3766   if (x->check_boolean()) {
3767     value = mask_boolean(array, value, null_check_info);
3768   }
3769   return value;
3770 }