1 /*
   2  * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"
  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/c1/barrierSetC1.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/vm_version.hpp"
  43 #include "utilities/bitMap.inline.hpp"
  44 #include "utilities/macros.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 
  47 #ifdef ASSERT
  48 #define __ gen()->lir(__FILE__, __LINE__)->
  49 #else
  50 #define __ gen()->lir()->
  51 #endif
  52 
  53 #ifndef PATCHED_ADDR
  54 #define PATCHED_ADDR  (max_jint)
  55 #endif
  56 
  57 void PhiResolverState::reset() {
  58   _virtual_operands.clear();
  59   _other_operands.clear();
  60   _vreg_table.clear();
  61 }
  62 
  63 
  64 //--------------------------------------------------------------
  65 // PhiResolver
  66 
  67 // Resolves cycles:
  68 //
  69 //  r1 := r2  becomes  temp := r1
  70 //  r2 := r1           r1 := r2
  71 //                     r2 := temp
  72 // and orders moves:
  73 //
  74 //  r2 := r3  becomes  r1 := r2
  75 //  r1 := r2           r2 := r3
  76 
  77 PhiResolver::PhiResolver(LIRGenerator* gen)
  78  : _gen(gen)
  79  , _state(gen->resolver_state())
  80  , _temp(LIR_OprFact::illegalOpr)
  81 {
  82   // reinitialize the shared state arrays
  83   _state.reset();
  84 }
  85 
  86 
  87 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
  88   assert(src->is_valid(), "");
  89   assert(dest->is_valid(), "");
  90   __ move(src, dest);
  91 }
  92 
  93 
  94 void PhiResolver::move_temp_to(LIR_Opr dest) {
  95   assert(_temp->is_valid(), "");
  96   emit_move(_temp, dest);
  97   NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
  98 }
  99 
 100 
 101 void PhiResolver::move_to_temp(LIR_Opr src) {
 102   assert(_temp->is_illegal(), "");
 103   _temp = _gen->new_register(src->type());
 104   emit_move(src, _temp);
 105 }
 106 
 107 
 108 // Traverse assignment graph in depth first order and generate moves in post order
 109 // ie. two assignments: b := c, a := b start with node c:
 110 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
 111 // Generates moves in this order: move b to a and move c to b
 112 // ie. cycle a := b, b := a start with node a
 113 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
 114 // Generates moves in this order: move b to temp, move a to b, move temp to a
 115 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
 116   if (!dest->visited()) {
 117     dest->set_visited();
 118     for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
 119       move(dest, dest->destination_at(i));
 120     }
 121   } else if (!dest->start_node()) {
 122     // cylce in graph detected
 123     assert(_loop == NULL, "only one loop valid!");
 124     _loop = dest;
 125     move_to_temp(src->operand());
 126     return;
 127   } // else dest is a start node
 128 
 129   if (!dest->assigned()) {
 130     if (_loop == dest) {
 131       move_temp_to(dest->operand());
 132       dest->set_assigned();
 133     } else if (src != NULL) {
 134       emit_move(src->operand(), dest->operand());
 135       dest->set_assigned();
 136     }
 137   }
 138 }
 139 
 140 
 141 PhiResolver::~PhiResolver() {
 142   int i;
 143   // resolve any cycles in moves from and to virtual registers
 144   for (i = virtual_operands().length() - 1; i >= 0; i --) {
 145     ResolveNode* node = virtual_operands().at(i);
 146     if (!node->visited()) {
 147       _loop = NULL;
 148       move(NULL, node);
 149       node->set_start_node();
 150       assert(_temp->is_illegal(), "move_temp_to() call missing");
 151     }
 152   }
 153 
 154   // generate move for move from non virtual register to abitrary destination
 155   for (i = other_operands().length() - 1; i >= 0; i --) {
 156     ResolveNode* node = other_operands().at(i);
 157     for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
 158       emit_move(node->operand(), node->destination_at(j)->operand());
 159     }
 160   }
 161 }
 162 
 163 
 164 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
 165   ResolveNode* node;
 166   if (opr->is_virtual()) {
 167     int vreg_num = opr->vreg_number();
 168     node = vreg_table().at_grow(vreg_num, NULL);
 169     assert(node == NULL || node->operand() == opr, "");
 170     if (node == NULL) {
 171       node = new ResolveNode(opr);
 172       vreg_table().at_put(vreg_num, node);
 173     }
 174     // Make sure that all virtual operands show up in the list when
 175     // they are used as the source of a move.
 176     if (source && !virtual_operands().contains(node)) {
 177       virtual_operands().append(node);
 178     }
 179   } else {
 180     assert(source, "");
 181     node = new ResolveNode(opr);
 182     other_operands().append(node);
 183   }
 184   return node;
 185 }
 186 
 187 
 188 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
 189   assert(dest->is_virtual(), "");
 190   // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
 191   assert(src->is_valid(), "");
 192   assert(dest->is_valid(), "");
 193   ResolveNode* source = source_node(src);
 194   source->append(destination_node(dest));
 195 }
 196 
 197 
 198 //--------------------------------------------------------------
 199 // LIRItem
 200 
 201 void LIRItem::set_result(LIR_Opr opr) {
 202   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
 203   value()->set_operand(opr);
 204 
 205   if (opr->is_virtual()) {
 206     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
 207   }
 208 
 209   _result = opr;
 210 }
 211 
 212 void LIRItem::load_item() {
 213   if (result()->is_illegal()) {
 214     // update the items result
 215     _result = value()->operand();
 216   }
 217   if (!result()->is_register()) {
 218     LIR_Opr reg = _gen->new_register(value()->type());
 219     __ move(result(), reg);
 220     if (result()->is_constant()) {
 221       _result = reg;
 222     } else {
 223       set_result(reg);
 224     }
 225   }
 226 }
 227 
 228 
 229 void LIRItem::load_for_store(BasicType type) {
 230   if (_gen->can_store_as_constant(value(), type)) {
 231     _result = value()->operand();
 232     if (!_result->is_constant()) {
 233       _result = LIR_OprFact::value_type(value()->type());
 234     }
 235   } else if (type == T_BYTE || type == T_BOOLEAN) {
 236     load_byte_item();
 237   } else {
 238     load_item();
 239   }
 240 }
 241 
 242 void LIRItem::load_item_force(LIR_Opr reg) {
 243   LIR_Opr r = result();
 244   if (r != reg) {
 245 #if !defined(ARM) && !defined(E500V2)
 246     if (r->type() != reg->type()) {
 247       // moves between different types need an intervening spill slot
 248       r = _gen->force_to_spill(r, reg->type());
 249     }
 250 #endif
 251     __ move(r, reg);
 252     _result = reg;
 253   }
 254 }
 255 
 256 ciObject* LIRItem::get_jobject_constant() const {
 257   ObjectType* oc = type()->as_ObjectType();
 258   if (oc) {
 259     return oc->constant_value();
 260   }
 261   return NULL;
 262 }
 263 
 264 
 265 jint LIRItem::get_jint_constant() const {
 266   assert(is_constant() && value() != NULL, "");
 267   assert(type()->as_IntConstant() != NULL, "type check");
 268   return type()->as_IntConstant()->value();
 269 }
 270 
 271 
 272 jint LIRItem::get_address_constant() const {
 273   assert(is_constant() && value() != NULL, "");
 274   assert(type()->as_AddressConstant() != NULL, "type check");
 275   return type()->as_AddressConstant()->value();
 276 }
 277 
 278 
 279 jfloat LIRItem::get_jfloat_constant() const {
 280   assert(is_constant() && value() != NULL, "");
 281   assert(type()->as_FloatConstant() != NULL, "type check");
 282   return type()->as_FloatConstant()->value();
 283 }
 284 
 285 
 286 jdouble LIRItem::get_jdouble_constant() const {
 287   assert(is_constant() && value() != NULL, "");
 288   assert(type()->as_DoubleConstant() != NULL, "type check");
 289   return type()->as_DoubleConstant()->value();
 290 }
 291 
 292 
 293 jlong LIRItem::get_jlong_constant() const {
 294   assert(is_constant() && value() != NULL, "");
 295   assert(type()->as_LongConstant() != NULL, "type check");
 296   return type()->as_LongConstant()->value();
 297 }
 298 
 299 
 300 
 301 //--------------------------------------------------------------
 302 
 303 
 304 void LIRGenerator::block_do_prolog(BlockBegin* block) {
 305 #ifndef PRODUCT
 306   if (PrintIRWithLIR) {
 307     block->print();
 308   }
 309 #endif
 310 
 311   // set up the list of LIR instructions
 312   assert(block->lir() == NULL, "LIR list already computed for this block");
 313   _lir = new LIR_List(compilation(), block);
 314   block->set_lir(_lir);
 315 
 316   __ branch_destination(block->label());
 317 
 318   if (LIRTraceExecution &&
 319       Compilation::current()->hir()->start()->block_id() != block->block_id() &&
 320       !block->is_set(BlockBegin::exception_entry_flag)) {
 321     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
 322     trace_block_entry(block);
 323   }
 324 }
 325 
 326 
 327 void LIRGenerator::block_do_epilog(BlockBegin* block) {
 328 #ifndef PRODUCT
 329   if (PrintIRWithLIR) {
 330     tty->cr();
 331   }
 332 #endif
 333 
 334   // LIR_Opr for unpinned constants shouldn't be referenced by other
 335   // blocks so clear them out after processing the block.
 336   for (int i = 0; i < _unpinned_constants.length(); i++) {
 337     _unpinned_constants.at(i)->clear_operand();
 338   }
 339   _unpinned_constants.trunc_to(0);
 340 
 341   // clear our any registers for other local constants
 342   _constants.trunc_to(0);
 343   _reg_for_constants.trunc_to(0);
 344 }
 345 
 346 
 347 void LIRGenerator::block_do(BlockBegin* block) {
 348   CHECK_BAILOUT();
 349 
 350   block_do_prolog(block);
 351   set_block(block);
 352 
 353   for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
 354     if (instr->is_pinned()) do_root(instr);
 355   }
 356 
 357   set_block(NULL);
 358   block_do_epilog(block);
 359 }
 360 
 361 
 362 //-------------------------LIRGenerator-----------------------------
 363 
 364 // This is where the tree-walk starts; instr must be root;
 365 void LIRGenerator::do_root(Value instr) {
 366   CHECK_BAILOUT();
 367 
 368   InstructionMark im(compilation(), instr);
 369 
 370   assert(instr->is_pinned(), "use only with roots");
 371   assert(instr->subst() == instr, "shouldn't have missed substitution");
 372 
 373   instr->visit(this);
 374 
 375   assert(!instr->has_uses() || instr->operand()->is_valid() ||
 376          instr->as_Constant() != NULL || bailed_out(), "invalid item set");
 377 }
 378 
 379 
 380 // This is called for each node in tree; the walk stops if a root is reached
 381 void LIRGenerator::walk(Value instr) {
 382   InstructionMark im(compilation(), instr);
 383   //stop walk when encounter a root
 384   if ((instr->is_pinned() && instr->as_Phi() == NULL) || instr->operand()->is_valid()) {
 385     assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
 386   } else {
 387     assert(instr->subst() == instr, "shouldn't have missed substitution");
 388     instr->visit(this);
 389     // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
 390   }
 391 }
 392 
 393 
 394 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
 395   assert(state != NULL, "state must be defined");
 396 
 397 #ifndef PRODUCT
 398   state->verify();
 399 #endif
 400 
 401   ValueStack* s = state;
 402   for_each_state(s) {
 403     if (s->kind() == ValueStack::EmptyExceptionState) {
 404       assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
 405       continue;
 406     }
 407 
 408     int index;
 409     Value value;
 410     for_each_stack_value(s, index, value) {
 411       assert(value->subst() == value, "missed substitution");
 412       if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 413         walk(value);
 414         assert(value->operand()->is_valid(), "must be evaluated now");
 415       }
 416     }
 417 
 418     int bci = s->bci();
 419     IRScope* scope = s->scope();
 420     ciMethod* method = scope->method();
 421 
 422     MethodLivenessResult liveness = method->liveness_at_bci(bci);
 423     if (bci == SynchronizationEntryBCI) {
 424       if (x->as_ExceptionObject() || x->as_Throw()) {
 425         // all locals are dead on exit from the synthetic unlocker
 426         liveness.clear();
 427       } else {
 428         assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
 429       }
 430     }
 431     if (!liveness.is_valid()) {
 432       // Degenerate or breakpointed method.
 433       bailout("Degenerate or breakpointed method");
 434     } else {
 435       assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
 436       for_each_local_value(s, index, value) {
 437         assert(value->subst() == value, "missed substition");
 438         if (liveness.at(index) && !value->type()->is_illegal()) {
 439           if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 440             walk(value);
 441             assert(value->operand()->is_valid(), "must be evaluated now");
 442           }
 443         } else {
 444           // NULL out this local so that linear scan can assume that all non-NULL values are live.
 445           s->invalidate_local(index);
 446         }
 447       }
 448     }
 449   }
 450 
 451   return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
 452 }
 453 
 454 
 455 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
 456   return state_for(x, x->exception_state());
 457 }
 458 
 459 
 460 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
 461   /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation
 462    * is active and the class hasn't yet been resolved we need to emit a patch that resolves
 463    * the class. */
 464   if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
 465     assert(info != NULL, "info must be set if class is not loaded");
 466     __ klass2reg_patch(NULL, r, info);
 467   } else {
 468     // no patching needed
 469     __ metadata2reg(obj->constant_encoding(), r);
 470   }
 471 }
 472 
 473 
 474 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
 475                                     CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
 476   CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
 477   if (index->is_constant()) {
 478     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
 479                 index->as_jint(), null_check_info);
 480     __ branch(lir_cond_belowEqual, stub); // forward branch
 481   } else {
 482     cmp_reg_mem(lir_cond_aboveEqual, index, array,
 483                 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
 484     __ branch(lir_cond_aboveEqual, stub); // forward branch
 485   }
 486 }
 487 
 488 
 489 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
 490   CodeStub* stub = new RangeCheckStub(info, index);
 491   if (index->is_constant()) {
 492     cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
 493     __ branch(lir_cond_belowEqual, stub); // forward branch
 494   } else {
 495     cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
 496                 java_nio_Buffer::limit_offset(), T_INT, info);
 497     __ branch(lir_cond_aboveEqual, stub); // forward branch
 498   }
 499   __ move(index, result);
 500 }
 501 
 502 
 503 
 504 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp_op, CodeEmitInfo* info) {
 505   LIR_Opr result_op = result;
 506   LIR_Opr left_op   = left;
 507   LIR_Opr right_op  = right;
 508 
 509   if (TwoOperandLIRForm && left_op != result_op) {
 510     assert(right_op != result_op, "malformed");
 511     __ move(left_op, result_op);
 512     left_op = result_op;
 513   }
 514 
 515   switch(code) {
 516     case Bytecodes::_dadd:
 517     case Bytecodes::_fadd:
 518     case Bytecodes::_ladd:
 519     case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
 520     case Bytecodes::_fmul:
 521     case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
 522 
 523     case Bytecodes::_dmul:  __ mul(left_op, right_op, result_op, tmp_op); break;
 524 
 525     case Bytecodes::_imul:
 526       {
 527         bool did_strength_reduce = false;
 528 
 529         if (right->is_constant()) {
 530           jint c = right->as_jint();
 531           if (c > 0 && is_power_of_2(c)) {
 532             // do not need tmp here
 533             __ shift_left(left_op, exact_log2(c), result_op);
 534             did_strength_reduce = true;
 535           } else {
 536             did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
 537           }
 538         }
 539         // we couldn't strength reduce so just emit the multiply
 540         if (!did_strength_reduce) {
 541           __ mul(left_op, right_op, result_op);
 542         }
 543       }
 544       break;
 545 
 546     case Bytecodes::_dsub:
 547     case Bytecodes::_fsub:
 548     case Bytecodes::_lsub:
 549     case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
 550 
 551     case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
 552     // ldiv and lrem are implemented with a direct runtime call
 553 
 554     case Bytecodes::_ddiv: __ div(left_op, right_op, result_op, tmp_op); break;
 555 
 556     case Bytecodes::_drem:
 557     case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
 558 
 559     default: ShouldNotReachHere();
 560   }
 561 }
 562 
 563 
 564 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 565   arithmetic_op(code, result, left, right, tmp);
 566 }
 567 
 568 
 569 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
 570   arithmetic_op(code, result, left, right, LIR_OprFact::illegalOpr, info);
 571 }
 572 
 573 
 574 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
 575   arithmetic_op(code, result, left, right, tmp);
 576 }
 577 
 578 
 579 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
 580 
 581   if (TwoOperandLIRForm && value != result_op
 582       // Only 32bit right shifts require two operand form on S390.
 583       S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {
 584     assert(count != result_op, "malformed");
 585     __ move(value, result_op);
 586     value = result_op;
 587   }
 588 
 589   assert(count->is_constant() || count->is_register(), "must be");
 590   switch(code) {
 591   case Bytecodes::_ishl:
 592   case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
 593   case Bytecodes::_ishr:
 594   case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
 595   case Bytecodes::_iushr:
 596   case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
 597   default: ShouldNotReachHere();
 598   }
 599 }
 600 
 601 
 602 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
 603   if (TwoOperandLIRForm && left_op != result_op) {
 604     assert(right_op != result_op, "malformed");
 605     __ move(left_op, result_op);
 606     left_op = result_op;
 607   }
 608 
 609   switch(code) {
 610     case Bytecodes::_iand:
 611     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 612 
 613     case Bytecodes::_ior:
 614     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 615 
 616     case Bytecodes::_ixor:
 617     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 618 
 619     default: ShouldNotReachHere();
 620   }
 621 }
 622 
 623 
 624 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
 625   if (!GenerateSynchronizationCode) return;
 626   // for slow path, use debug info for state after successful locking
 627   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
 628   __ load_stack_address_monitor(monitor_no, lock);
 629   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 630   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
 631 }
 632 
 633 
 634 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 635   if (!GenerateSynchronizationCode) return;
 636   // setup registers
 637   LIR_Opr hdr = lock;
 638   lock = new_hdr;
 639   CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
 640   __ load_stack_address_monitor(monitor_no, lock);
 641   __ unlock_object(hdr, object, lock, scratch, slow_path);
 642 }
 643 
 644 #ifndef PRODUCT
 645 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 646   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 647     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 648   } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
 649     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 650   }
 651 }
 652 #endif
 653 
 654 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
 655   klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
 656   // If klass is not loaded we do not know if the klass has finalizers:
 657   if (UseFastNewInstance && klass->is_loaded()
 658       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 659 
 660     Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
 661 
 662     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 663 
 664     assert(klass->is_loaded(), "must be loaded");
 665     // allocate space for instance
 666     assert(klass->size_helper() > 0, "illegal instance size");
 667     const int instance_size = align_object_size(klass->size_helper());
 668     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
 669                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
 670   } else {
 671     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
 672     __ branch(lir_cond_always, slow_path);
 673     __ branch_destination(slow_path->continuation());
 674   }
 675 }
 676 
 677 
 678 static bool is_constant_zero(Instruction* inst) {
 679   IntConstant* c = inst->type()->as_IntConstant();
 680   if (c) {
 681     return (c->value() == 0);
 682   }
 683   return false;
 684 }
 685 
 686 
 687 static bool positive_constant(Instruction* inst) {
 688   IntConstant* c = inst->type()->as_IntConstant();
 689   if (c) {
 690     return (c->value() >= 0);
 691   }
 692   return false;
 693 }
 694 
 695 
 696 static ciArrayKlass* as_array_klass(ciType* type) {
 697   if (type != NULL && type->is_array_klass() && type->is_loaded()) {
 698     return (ciArrayKlass*)type;
 699   } else {
 700     return NULL;
 701   }
 702 }
 703 
 704 static ciType* phi_declared_type(Phi* phi) {
 705   ciType* t = phi->operand_at(0)->declared_type();
 706   if (t == NULL) {
 707     return NULL;
 708   }
 709   for(int i = 1; i < phi->operand_count(); i++) {
 710     if (t != phi->operand_at(i)->declared_type()) {
 711       return NULL;
 712     }
 713   }
 714   return t;
 715 }
 716 
 717 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
 718   Instruction* src     = x->argument_at(0);
 719   Instruction* src_pos = x->argument_at(1);
 720   Instruction* dst     = x->argument_at(2);
 721   Instruction* dst_pos = x->argument_at(3);
 722   Instruction* length  = x->argument_at(4);
 723 
 724   // first try to identify the likely type of the arrays involved
 725   ciArrayKlass* expected_type = NULL;
 726   bool is_exact = false, src_objarray = false, dst_objarray = false;
 727   {
 728     ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
 729     ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
 730     Phi* phi;
 731     if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
 732       src_declared_type = as_array_klass(phi_declared_type(phi));
 733     }
 734     ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
 735     ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
 736     if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
 737       dst_declared_type = as_array_klass(phi_declared_type(phi));
 738     }
 739 
 740     if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
 741       // the types exactly match so the type is fully known
 742       is_exact = true;
 743       expected_type = src_exact_type;
 744     } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
 745       ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
 746       ciArrayKlass* src_type = NULL;
 747       if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
 748         src_type = (ciArrayKlass*) src_exact_type;
 749       } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
 750         src_type = (ciArrayKlass*) src_declared_type;
 751       }
 752       if (src_type != NULL) {
 753         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 754           is_exact = true;
 755           expected_type = dst_type;
 756         }
 757       }
 758     }
 759     // at least pass along a good guess
 760     if (expected_type == NULL) expected_type = dst_exact_type;
 761     if (expected_type == NULL) expected_type = src_declared_type;
 762     if (expected_type == NULL) expected_type = dst_declared_type;
 763 
 764     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 765     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 766   }
 767 
 768   // if a probable array type has been identified, figure out if any
 769   // of the required checks for a fast case can be elided.
 770   int flags = LIR_OpArrayCopy::all_flags;
 771 
 772   if (!src_objarray)
 773     flags &= ~LIR_OpArrayCopy::src_objarray;
 774   if (!dst_objarray)
 775     flags &= ~LIR_OpArrayCopy::dst_objarray;
 776 
 777   if (!x->arg_needs_null_check(0))
 778     flags &= ~LIR_OpArrayCopy::src_null_check;
 779   if (!x->arg_needs_null_check(2))
 780     flags &= ~LIR_OpArrayCopy::dst_null_check;
 781 
 782 
 783   if (expected_type != NULL) {
 784     Value length_limit = NULL;
 785 
 786     IfOp* ifop = length->as_IfOp();
 787     if (ifop != NULL) {
 788       // look for expressions like min(v, a.length) which ends up as
 789       //   x > y ? y : x  or  x >= y ? y : x
 790       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 791           ifop->x() == ifop->fval() &&
 792           ifop->y() == ifop->tval()) {
 793         length_limit = ifop->y();
 794       }
 795     }
 796 
 797     // try to skip null checks and range checks
 798     NewArray* src_array = src->as_NewArray();
 799     if (src_array != NULL) {
 800       flags &= ~LIR_OpArrayCopy::src_null_check;
 801       if (length_limit != NULL &&
 802           src_array->length() == length_limit &&
 803           is_constant_zero(src_pos)) {
 804         flags &= ~LIR_OpArrayCopy::src_range_check;
 805       }
 806     }
 807 
 808     NewArray* dst_array = dst->as_NewArray();
 809     if (dst_array != NULL) {
 810       flags &= ~LIR_OpArrayCopy::dst_null_check;
 811       if (length_limit != NULL &&
 812           dst_array->length() == length_limit &&
 813           is_constant_zero(dst_pos)) {
 814         flags &= ~LIR_OpArrayCopy::dst_range_check;
 815       }
 816     }
 817 
 818     // check from incoming constant values
 819     if (positive_constant(src_pos))
 820       flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
 821     if (positive_constant(dst_pos))
 822       flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
 823     if (positive_constant(length))
 824       flags &= ~LIR_OpArrayCopy::length_positive_check;
 825 
 826     // see if the range check can be elided, which might also imply
 827     // that src or dst is non-null.
 828     ArrayLength* al = length->as_ArrayLength();
 829     if (al != NULL) {
 830       if (al->array() == src) {
 831         // it's the length of the source array
 832         flags &= ~LIR_OpArrayCopy::length_positive_check;
 833         flags &= ~LIR_OpArrayCopy::src_null_check;
 834         if (is_constant_zero(src_pos))
 835           flags &= ~LIR_OpArrayCopy::src_range_check;
 836       }
 837       if (al->array() == dst) {
 838         // it's the length of the destination array
 839         flags &= ~LIR_OpArrayCopy::length_positive_check;
 840         flags &= ~LIR_OpArrayCopy::dst_null_check;
 841         if (is_constant_zero(dst_pos))
 842           flags &= ~LIR_OpArrayCopy::dst_range_check;
 843       }
 844     }
 845     if (is_exact) {
 846       flags &= ~LIR_OpArrayCopy::type_check;
 847     }
 848   }
 849 
 850   IntConstant* src_int = src_pos->type()->as_IntConstant();
 851   IntConstant* dst_int = dst_pos->type()->as_IntConstant();
 852   if (src_int && dst_int) {
 853     int s_offs = src_int->value();
 854     int d_offs = dst_int->value();
 855     if (src_int->value() >= dst_int->value()) {
 856       flags &= ~LIR_OpArrayCopy::overlapping;
 857     }
 858     if (expected_type != NULL) {
 859       BasicType t = expected_type->element_type()->basic_type();
 860       int element_size = type2aelembytes(t);
 861       if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
 862           ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
 863         flags &= ~LIR_OpArrayCopy::unaligned;
 864       }
 865     }
 866   } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
 867     // src and dest positions are the same, or dst is zero so assume
 868     // nonoverlapping copy.
 869     flags &= ~LIR_OpArrayCopy::overlapping;
 870   }
 871 
 872   if (src == dst) {
 873     // moving within a single array so no type checks are needed
 874     if (flags & LIR_OpArrayCopy::type_check) {
 875       flags &= ~LIR_OpArrayCopy::type_check;
 876     }
 877   }
 878   *flagsp = flags;
 879   *expected_typep = (ciArrayKlass*)expected_type;
 880 }
 881 
 882 
 883 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
 884   assert(opr->is_register(), "why spill if item is not register?");
 885 
 886   if (strict_fp_requires_explicit_rounding) {
 887 #ifdef IA32
 888     if (UseSSE < 1 && opr->is_single_fpu()) {
 889       LIR_Opr result = new_register(T_FLOAT);
 890       set_vreg_flag(result, must_start_in_memory);
 891       assert(opr->is_register(), "only a register can be spilled");
 892       assert(opr->value_type()->is_float(), "rounding only for floats available");
 893       __ roundfp(opr, LIR_OprFact::illegalOpr, result);
 894       return result;
 895     }
 896 #else
 897     Unimplemented();
 898 #endif // IA32
 899   }
 900   return opr;
 901 }
 902 
 903 
 904 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
 905   assert(type2size[t] == type2size[value->type()],
 906          "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
 907   if (!value->is_register()) {
 908     // force into a register
 909     LIR_Opr r = new_register(value->type());
 910     __ move(value, r);
 911     value = r;
 912   }
 913 
 914   // create a spill location
 915   LIR_Opr tmp = new_register(t);
 916   set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
 917 
 918   // move from register to spill
 919   __ move(value, tmp);
 920   return tmp;
 921 }
 922 
 923 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
 924   if (if_instr->should_profile()) {
 925     ciMethod* method = if_instr->profiled_method();
 926     assert(method != NULL, "method should be set if branch is profiled");
 927     ciMethodData* md = method->method_data_or_null();
 928     assert(md != NULL, "Sanity");
 929     ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
 930     assert(data != NULL, "must have profiling data");
 931     assert(data->is_BranchData(), "need BranchData for two-way branches");
 932     int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
 933     int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
 934     if (if_instr->is_swapped()) {
 935       int t = taken_count_offset;
 936       taken_count_offset = not_taken_count_offset;
 937       not_taken_count_offset = t;
 938     }
 939 
 940     LIR_Opr md_reg = new_register(T_METADATA);
 941     __ metadata2reg(md->constant_encoding(), md_reg);
 942 
 943     LIR_Opr data_offset_reg = new_pointer_register();
 944     __ cmove(lir_cond(cond),
 945              LIR_OprFact::intptrConst(taken_count_offset),
 946              LIR_OprFact::intptrConst(not_taken_count_offset),
 947              data_offset_reg, as_BasicType(if_instr->x()->type()));
 948 
 949     // MDO cells are intptr_t, so the data_reg width is arch-dependent.
 950     LIR_Opr data_reg = new_pointer_register();
 951     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
 952     __ move(data_addr, data_reg);
 953     // Use leal instead of add to avoid destroying condition codes on x86
 954     LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
 955     __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
 956     __ move(data_reg, data_addr);
 957   }
 958 }
 959 
 960 // Phi technique:
 961 // This is about passing live values from one basic block to the other.
 962 // In code generated with Java it is rather rare that more than one
 963 // value is on the stack from one basic block to the other.
 964 // We optimize our technique for efficient passing of one value
 965 // (of type long, int, double..) but it can be extended.
 966 // When entering or leaving a basic block, all registers and all spill
 967 // slots are release and empty. We use the released registers
 968 // and spill slots to pass the live values from one block
 969 // to the other. The topmost value, i.e., the value on TOS of expression
 970 // stack is passed in registers. All other values are stored in spilling
 971 // area. Every Phi has an index which designates its spill slot
 972 // At exit of a basic block, we fill the register(s) and spill slots.
 973 // At entry of a basic block, the block_prolog sets up the content of phi nodes
 974 // and locks necessary registers and spilling slots.
 975 
 976 
 977 // move current value to referenced phi function
 978 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
 979   Phi* phi = sux_val->as_Phi();
 980   // cur_val can be null without phi being null in conjunction with inlining
 981   if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
 982     if (phi->is_local()) {
 983       for (int i = 0; i < phi->operand_count(); i++) {
 984         Value op = phi->operand_at(i);
 985         if (op != NULL && op->type()->is_illegal()) {
 986           bailout("illegal phi operand");
 987         }
 988       }
 989     }
 990     Phi* cur_phi = cur_val->as_Phi();
 991     if (cur_phi != NULL && cur_phi->is_illegal()) {
 992       // Phi and local would need to get invalidated
 993       // (which is unexpected for Linear Scan).
 994       // But this case is very rare so we simply bail out.
 995       bailout("propagation of illegal phi");
 996       return;
 997     }
 998     LIR_Opr operand = cur_val->operand();
 999     if (operand->is_illegal()) {
1000       assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1001              "these can be produced lazily");
1002       operand = operand_for_instruction(cur_val);
1003     }
1004     resolver->move(operand, operand_for_instruction(phi));
1005   }
1006 }
1007 
1008 
1009 // Moves all stack values into their PHI position
1010 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1011   BlockBegin* bb = block();
1012   if (bb->number_of_sux() == 1) {
1013     BlockBegin* sux = bb->sux_at(0);
1014     assert(sux->number_of_preds() > 0, "invalid CFG");
1015 
1016     // a block with only one predecessor never has phi functions
1017     if (sux->number_of_preds() > 1) {
1018       PhiResolver resolver(this);
1019 
1020       ValueStack* sux_state = sux->state();
1021       Value sux_value;
1022       int index;
1023 
1024       assert(cur_state->scope() == sux_state->scope(), "not matching");
1025       assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1026       assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1027 
1028       for_each_stack_value(sux_state, index, sux_value) {
1029         move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1030       }
1031 
1032       for_each_local_value(sux_state, index, sux_value) {
1033         move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1034       }
1035 
1036       assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1037     }
1038   }
1039 }
1040 
1041 
1042 LIR_Opr LIRGenerator::new_register(BasicType type) {
1043   int vreg_num = _virtual_register_number;
1044   // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out
1045   // a few extra registers before we really run out which helps to avoid to trip over assertions.
1046   if (vreg_num + 20 >= LIR_OprDesc::vreg_max) {
1047     bailout("out of virtual registers in LIR generator");
1048     if (vreg_num + 2 >= LIR_OprDesc::vreg_max) {
1049       // Wrap it around and continue until bailout really happens to avoid hitting assertions.
1050       _virtual_register_number = LIR_OprDesc::vreg_base;
1051       vreg_num = LIR_OprDesc::vreg_base;
1052     }
1053   }
1054   _virtual_register_number += 1;
1055   LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type);
1056   assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");
1057   return vreg;
1058 }
1059 
1060 
1061 // Try to lock using register in hint
1062 LIR_Opr LIRGenerator::rlock(Value instr) {
1063   return new_register(instr->type());
1064 }
1065 
1066 
1067 // does an rlock and sets result
1068 LIR_Opr LIRGenerator::rlock_result(Value x) {
1069   LIR_Opr reg = rlock(x);
1070   set_result(x, reg);
1071   return reg;
1072 }
1073 
1074 
1075 // does an rlock and sets result
1076 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1077   LIR_Opr reg;
1078   switch (type) {
1079   case T_BYTE:
1080   case T_BOOLEAN:
1081     reg = rlock_byte(type);
1082     break;
1083   default:
1084     reg = rlock(x);
1085     break;
1086   }
1087 
1088   set_result(x, reg);
1089   return reg;
1090 }
1091 
1092 
1093 //---------------------------------------------------------------------
1094 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1095   ObjectType* oc = value->type()->as_ObjectType();
1096   if (oc) {
1097     return oc->constant_value();
1098   }
1099   return NULL;
1100 }
1101 
1102 
1103 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1104   assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1105   assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1106 
1107   // no moves are created for phi functions at the begin of exception
1108   // handlers, so assign operands manually here
1109   for_each_phi_fun(block(), phi,
1110                    if (!phi->is_illegal()) { operand_for_instruction(phi); });
1111 
1112   LIR_Opr thread_reg = getThreadPointer();
1113   __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1114                exceptionOopOpr());
1115   __ move_wide(LIR_OprFact::oopConst(NULL),
1116                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1117   __ move_wide(LIR_OprFact::oopConst(NULL),
1118                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1119 
1120   LIR_Opr result = new_register(T_OBJECT);
1121   __ move(exceptionOopOpr(), result);
1122   set_result(x, result);
1123 }
1124 
1125 
1126 //----------------------------------------------------------------------
1127 //----------------------------------------------------------------------
1128 //----------------------------------------------------------------------
1129 //----------------------------------------------------------------------
1130 //                        visitor functions
1131 //----------------------------------------------------------------------
1132 //----------------------------------------------------------------------
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 
1136 void LIRGenerator::do_Phi(Phi* x) {
1137   // phi functions are never visited directly
1138   ShouldNotReachHere();
1139 }
1140 
1141 
1142 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1143 void LIRGenerator::do_Constant(Constant* x) {
1144   if (x->state_before() != NULL) {
1145     // Any constant with a ValueStack requires patching so emit the patch here
1146     LIR_Opr reg = rlock_result(x);
1147     CodeEmitInfo* info = state_for(x, x->state_before());
1148     __ oop2reg_patch(NULL, reg, info);
1149   } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1150     if (!x->is_pinned()) {
1151       // unpinned constants are handled specially so that they can be
1152       // put into registers when they are used multiple times within a
1153       // block.  After the block completes their operand will be
1154       // cleared so that other blocks can't refer to that register.
1155       set_result(x, load_constant(x));
1156     } else {
1157       LIR_Opr res = x->operand();
1158       if (!res->is_valid()) {
1159         res = LIR_OprFact::value_type(x->type());
1160       }
1161       if (res->is_constant()) {
1162         LIR_Opr reg = rlock_result(x);
1163         __ move(res, reg);
1164       } else {
1165         set_result(x, res);
1166       }
1167     }
1168   } else {
1169     set_result(x, LIR_OprFact::value_type(x->type()));
1170   }
1171 }
1172 
1173 
1174 void LIRGenerator::do_Local(Local* x) {
1175   // operand_for_instruction has the side effect of setting the result
1176   // so there's no need to do it here.
1177   operand_for_instruction(x);
1178 }
1179 
1180 
1181 void LIRGenerator::do_Return(Return* x) {
1182   if (compilation()->env()->dtrace_method_probes()) {
1183     BasicTypeList signature;
1184     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
1185     signature.append(T_METADATA); // Method*
1186     LIR_OprList* args = new LIR_OprList();
1187     args->append(getThreadPointer());
1188     LIR_Opr meth = new_register(T_METADATA);
1189     __ metadata2reg(method()->constant_encoding(), meth);
1190     args->append(meth);
1191     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1192   }
1193 
1194   if (x->type()->is_void()) {
1195     __ return_op(LIR_OprFact::illegalOpr);
1196   } else {
1197     LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1198     LIRItem result(x->result(), this);
1199 
1200     result.load_item_force(reg);
1201     __ return_op(result.result());
1202   }
1203   set_no_result(x);
1204 }
1205 
1206 // Examble: ref.get()
1207 // Combination of LoadField and g1 pre-write barrier
1208 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1209 
1210   const int referent_offset = java_lang_ref_Reference::referent_offset();
1211 
1212   assert(x->number_of_arguments() == 1, "wrong type");
1213 
1214   LIRItem reference(x->argument_at(0), this);
1215   reference.load_item();
1216 
1217   // need to perform the null check on the reference objecy
1218   CodeEmitInfo* info = NULL;
1219   if (x->needs_null_check()) {
1220     info = state_for(x);
1221   }
1222 
1223   LIR_Opr result = rlock_result(x, T_OBJECT);
1224   access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
1225                  reference, LIR_OprFact::intConst(referent_offset), result);
1226 }
1227 
1228 // Example: clazz.isInstance(object)
1229 void LIRGenerator::do_isInstance(Intrinsic* x) {
1230   assert(x->number_of_arguments() == 2, "wrong type");
1231 
1232   // TODO could try to substitute this node with an equivalent InstanceOf
1233   // if clazz is known to be a constant Class. This will pick up newly found
1234   // constants after HIR construction. I'll leave this to a future change.
1235 
1236   // as a first cut, make a simple leaf call to runtime to stay platform independent.
1237   // could follow the aastore example in a future change.
1238 
1239   LIRItem clazz(x->argument_at(0), this);
1240   LIRItem object(x->argument_at(1), this);
1241   clazz.load_item();
1242   object.load_item();
1243   LIR_Opr result = rlock_result(x);
1244 
1245   // need to perform null check on clazz
1246   if (x->needs_null_check()) {
1247     CodeEmitInfo* info = state_for(x);
1248     __ null_check(clazz.result(), info);
1249   }
1250 
1251   LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1252                                      CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1253                                      x->type(),
1254                                      NULL); // NULL CodeEmitInfo results in a leaf call
1255   __ move(call_result, result);
1256 }
1257 
1258 void LIRGenerator::load_klass(LIR_Opr obj, LIR_Opr klass, CodeEmitInfo* null_check_info) {
1259   CodeStub* slow_path = UseCompactObjectHeaders ? new LoadKlassStub(klass) : NULL;
1260   __ load_klass(obj, klass, null_check_info, slow_path);
1261 }
1262 
1263 // Example: object.getClass ()
1264 void LIRGenerator::do_getClass(Intrinsic* x) {
1265   assert(x->number_of_arguments() == 1, "wrong type");
1266 
1267   LIRItem rcvr(x->argument_at(0), this);
1268   rcvr.load_item();
1269   LIR_Opr temp = new_register(T_ADDRESS);
1270   LIR_Opr result = rlock_result(x);
1271 
1272   // need to perform the null check on the rcvr
1273   CodeEmitInfo* info = NULL;
1274   if (x->needs_null_check()) {
1275     info = state_for(x);
1276   }
1277 
1278   LIR_Opr klass = new_register(T_METADATA);
1279   load_klass(rcvr.result(), klass, info);
1280   __ move_wide(new LIR_Address(klass, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);

1281   // mirror = ((OopHandle)mirror)->resolve();
1282   access_load(IN_NATIVE, T_OBJECT,
1283               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
1284 }
1285 
1286 // java.lang.Class::isPrimitive()
1287 void LIRGenerator::do_isPrimitive(Intrinsic* x) {
1288   assert(x->number_of_arguments() == 1, "wrong type");
1289 
1290   LIRItem rcvr(x->argument_at(0), this);
1291   rcvr.load_item();
1292   LIR_Opr temp = new_register(T_METADATA);
1293   LIR_Opr result = rlock_result(x);
1294 
1295   CodeEmitInfo* info = NULL;
1296   if (x->needs_null_check()) {
1297     info = state_for(x);
1298   }
1299 
1300   __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset(), T_ADDRESS), temp, info);
1301   __ cmp(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0));
1302   __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);
1303 }
1304 
1305 // Example: Foo.class.getModifiers()
1306 void LIRGenerator::do_getModifiers(Intrinsic* x) {
1307   assert(x->number_of_arguments() == 1, "wrong type");
1308 
1309   LIRItem receiver(x->argument_at(0), this);
1310   receiver.load_item();
1311   LIR_Opr result = rlock_result(x);
1312 
1313   CodeEmitInfo* info = NULL;
1314   if (x->needs_null_check()) {
1315     info = state_for(x);
1316   }
1317 
1318   // While reading off the universal constant mirror is less efficient than doing
1319   // another branch and returning the constant answer, this branchless code runs into
1320   // much less risk of confusion for C1 register allocator. The choice of the universe
1321   // object here is correct as long as it returns the same modifiers we would expect
1322   // from the primitive class itself. See spec for Class.getModifiers that provides
1323   // the typed array klasses with similar modifiers as their component types.
1324 
1325   Klass* univ_klass_obj = Universe::byteArrayKlassObj();
1326   assert(univ_klass_obj->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC), "Sanity");
1327   LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass_obj);
1328 
1329   LIR_Opr recv_klass = new_register(T_METADATA);
1330   __ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info);
1331 
1332   // Check if this is a Java mirror of primitive type, and select the appropriate klass.
1333   LIR_Opr klass = new_register(T_METADATA);
1334   __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0));
1335   __ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);
1336 
1337   // Get the answer.
1338   __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result);
1339 }
1340 
1341 // Example: Thread.currentThread()
1342 void LIRGenerator::do_currentThread(Intrinsic* x) {
1343   assert(x->number_of_arguments() == 0, "wrong type");
1344   LIR_Opr temp = new_register(T_ADDRESS);
1345   LIR_Opr reg = rlock_result(x);
1346   __ move(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_ADDRESS), temp);
1347   // threadObj = ((OopHandle)_threadObj)->resolve();
1348   access_load(IN_NATIVE, T_OBJECT,
1349               LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg);
1350 }
1351 
1352 void LIRGenerator::do_getObjectSize(Intrinsic* x) {
1353   assert(x->number_of_arguments() == 3, "wrong type");
1354   LIR_Opr result_reg = rlock_result(x);
1355 
1356   LIRItem value(x->argument_at(2), this);
1357   value.load_item();
1358 
1359   LIR_Opr klass = new_register(T_METADATA);
1360   load_klass(value.result(), klass, NULL);
1361   LIR_Opr layout = new_register(T_INT);
1362   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1363 
1364   LabelObj* L_done = new LabelObj();
1365   LabelObj* L_array = new LabelObj();
1366 
1367   __ cmp(lir_cond_lessEqual, layout, 0);
1368   __ branch(lir_cond_lessEqual, L_array->label());
1369 
1370   // Instance case: the layout helper gives us instance size almost directly,
1371   // but we need to mask out the _lh_instance_slow_path_bit.
1372   __ convert(Bytecodes::_i2l, layout, result_reg);
1373 
1374   assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
1375   jlong mask = ~(jlong) right_n_bits(LogBytesPerLong);
1376   __ logical_and(result_reg, LIR_OprFact::longConst(mask), result_reg);
1377 
1378   __ branch(lir_cond_always, L_done->label());
1379 
1380   // Array case: size is round(header + element_size*arraylength).
1381   // Since arraylength is different for every array instance, we have to
1382   // compute the whole thing at runtime.
1383 
1384   __ branch_destination(L_array->label());
1385 
1386   int round_mask = MinObjAlignmentInBytes - 1;
1387 
1388   // Figure out header sizes first.
1389   LIR_Opr hss = LIR_OprFact::intConst(Klass::_lh_header_size_shift);
1390   LIR_Opr hsm = LIR_OprFact::intConst(Klass::_lh_header_size_mask);
1391 
1392   LIR_Opr header_size = new_register(T_INT);
1393   __ move(layout, header_size);
1394   LIR_Opr tmp = new_register(T_INT);
1395   __ unsigned_shift_right(header_size, hss, header_size, tmp);
1396   __ logical_and(header_size, hsm, header_size);
1397   __ add(header_size, LIR_OprFact::intConst(round_mask), header_size);
1398 
1399   // Figure out the array length in bytes
1400   assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
1401   LIR_Opr l2esm = LIR_OprFact::intConst(Klass::_lh_log2_element_size_mask);
1402   __ logical_and(layout, l2esm, layout);
1403 
1404   LIR_Opr length_int = new_register(T_INT);
1405   __ move(new LIR_Address(value.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), length_int);
1406 
1407 #ifdef _LP64
1408   LIR_Opr length = new_register(T_LONG);
1409   __ convert(Bytecodes::_i2l, length_int, length);
1410 #endif
1411 
1412   // Shift-left awkwardness. Normally it is just:
1413   //   __ shift_left(length, layout, length);
1414   // But C1 cannot perform shift_left with non-constant count, so we end up
1415   // doing the per-bit loop dance here. x86_32 also does not know how to shift
1416   // longs, so we have to act on ints.
1417   LabelObj* L_shift_loop = new LabelObj();
1418   LabelObj* L_shift_exit = new LabelObj();
1419 
1420   __ branch_destination(L_shift_loop->label());
1421   __ cmp(lir_cond_equal, layout, 0);
1422   __ branch(lir_cond_equal, L_shift_exit->label());
1423 
1424 #ifdef _LP64
1425   __ shift_left(length, 1, length);
1426 #else
1427   __ shift_left(length_int, 1, length_int);
1428 #endif
1429 
1430   __ sub(layout, LIR_OprFact::intConst(1), layout);
1431 
1432   __ branch(lir_cond_always, L_shift_loop->label());
1433   __ branch_destination(L_shift_exit->label());
1434 
1435   // Mix all up, round, and push to the result.
1436 #ifdef _LP64
1437   LIR_Opr header_size_long = new_register(T_LONG);
1438   __ convert(Bytecodes::_i2l, header_size, header_size_long);
1439   __ add(length, header_size_long, length);
1440   if (round_mask != 0) {
1441     __ logical_and(length, LIR_OprFact::longConst(~round_mask), length);
1442   }
1443   __ move(length, result_reg);
1444 #else
1445   __ add(length_int, header_size, length_int);
1446   if (round_mask != 0) {
1447     __ logical_and(length_int, LIR_OprFact::intConst(~round_mask), length_int);
1448   }
1449   __ convert(Bytecodes::_i2l, length_int, result_reg);
1450 #endif
1451 
1452   __ branch_destination(L_done->label());
1453 }
1454 
1455 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1456   assert(x->number_of_arguments() == 1, "wrong type");
1457   LIRItem receiver(x->argument_at(0), this);
1458 
1459   receiver.load_item();
1460   BasicTypeList signature;
1461   signature.append(T_OBJECT); // receiver
1462   LIR_OprList* args = new LIR_OprList();
1463   args->append(receiver.result());
1464   CodeEmitInfo* info = state_for(x, x->state());
1465   call_runtime(&signature, args,
1466                CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1467                voidType, info);
1468 
1469   set_no_result(x);
1470 }
1471 
1472 
1473 //------------------------local access--------------------------------------
1474 
1475 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1476   if (x->operand()->is_illegal()) {
1477     Constant* c = x->as_Constant();
1478     if (c != NULL) {
1479       x->set_operand(LIR_OprFact::value_type(c->type()));
1480     } else {
1481       assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1482       // allocate a virtual register for this local or phi
1483       x->set_operand(rlock(x));
1484       _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1485     }
1486   }
1487   return x->operand();
1488 }
1489 
1490 
1491 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1492   if (opr->is_virtual()) {
1493     return instruction_for_vreg(opr->vreg_number());
1494   }
1495   return NULL;
1496 }
1497 
1498 
1499 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1500   if (reg_num < _instruction_for_operand.length()) {
1501     return _instruction_for_operand.at(reg_num);
1502   }
1503   return NULL;
1504 }
1505 
1506 
1507 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1508   if (_vreg_flags.size_in_bits() == 0) {
1509     BitMap2D temp(100, num_vreg_flags);
1510     _vreg_flags = temp;
1511   }
1512   _vreg_flags.at_put_grow(vreg_num, f, true);
1513 }
1514 
1515 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1516   if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1517     return false;
1518   }
1519   return _vreg_flags.at(vreg_num, f);
1520 }
1521 
1522 
1523 // Block local constant handling.  This code is useful for keeping
1524 // unpinned constants and constants which aren't exposed in the IR in
1525 // registers.  Unpinned Constant instructions have their operands
1526 // cleared when the block is finished so that other blocks can't end
1527 // up referring to their registers.
1528 
1529 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1530   assert(!x->is_pinned(), "only for unpinned constants");
1531   _unpinned_constants.append(x);
1532   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1533 }
1534 
1535 
1536 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1537   BasicType t = c->type();
1538   for (int i = 0; i < _constants.length(); i++) {
1539     LIR_Const* other = _constants.at(i);
1540     if (t == other->type()) {
1541       switch (t) {
1542       case T_INT:
1543       case T_FLOAT:
1544         if (c->as_jint_bits() != other->as_jint_bits()) continue;
1545         break;
1546       case T_LONG:
1547       case T_DOUBLE:
1548         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1549         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1550         break;
1551       case T_OBJECT:
1552         if (c->as_jobject() != other->as_jobject()) continue;
1553         break;
1554       default:
1555         break;
1556       }
1557       return _reg_for_constants.at(i);
1558     }
1559   }
1560 
1561   LIR_Opr result = new_register(t);
1562   __ move((LIR_Opr)c, result);
1563   _constants.append(c);
1564   _reg_for_constants.append(result);
1565   return result;
1566 }
1567 
1568 //------------------------field access--------------------------------------
1569 
1570 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1571   assert(x->number_of_arguments() == 4, "wrong type");
1572   LIRItem obj   (x->argument_at(0), this);  // object
1573   LIRItem offset(x->argument_at(1), this);  // offset of field
1574   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
1575   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
1576   assert(obj.type()->tag() == objectTag, "invalid type");
1577   assert(cmp.type()->tag() == type->tag(), "invalid type");
1578   assert(val.type()->tag() == type->tag(), "invalid type");
1579 
1580   LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1581                                             obj, offset, cmp, val);
1582   set_result(x, result);
1583 }
1584 
1585 // Comment copied form templateTable_i486.cpp
1586 // ----------------------------------------------------------------------------
1587 // Volatile variables demand their effects be made known to all CPU's in
1588 // order.  Store buffers on most chips allow reads & writes to reorder; the
1589 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1590 // memory barrier (i.e., it's not sufficient that the interpreter does not
1591 // reorder volatile references, the hardware also must not reorder them).
1592 //
1593 // According to the new Java Memory Model (JMM):
1594 // (1) All volatiles are serialized wrt to each other.
1595 // ALSO reads & writes act as aquire & release, so:
1596 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1597 // the read float up to before the read.  It's OK for non-volatile memory refs
1598 // that happen before the volatile read to float down below it.
1599 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1600 // that happen BEFORE the write float down to after the write.  It's OK for
1601 // non-volatile memory refs that happen after the volatile write to float up
1602 // before it.
1603 //
1604 // We only put in barriers around volatile refs (they are expensive), not
1605 // _between_ memory refs (that would require us to track the flavor of the
1606 // previous memory refs).  Requirements (2) and (3) require some barriers
1607 // before volatile stores and after volatile loads.  These nearly cover
1608 // requirement (1) but miss the volatile-store-volatile-load case.  This final
1609 // case is placed after volatile-stores although it could just as well go
1610 // before volatile-loads.
1611 
1612 
1613 void LIRGenerator::do_StoreField(StoreField* x) {
1614   bool needs_patching = x->needs_patching();
1615   bool is_volatile = x->field()->is_volatile();
1616   BasicType field_type = x->field_type();
1617 
1618   CodeEmitInfo* info = NULL;
1619   if (needs_patching) {
1620     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1621     info = state_for(x, x->state_before());
1622   } else if (x->needs_null_check()) {
1623     NullCheck* nc = x->explicit_null_check();
1624     if (nc == NULL) {
1625       info = state_for(x);
1626     } else {
1627       info = state_for(nc);
1628     }
1629   }
1630 
1631   LIRItem object(x->obj(), this);
1632   LIRItem value(x->value(),  this);
1633 
1634   object.load_item();
1635 
1636   if (is_volatile || needs_patching) {
1637     // load item if field is volatile (fewer special cases for volatiles)
1638     // load item if field not initialized
1639     // load item if field not constant
1640     // because of code patching we cannot inline constants
1641     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1642       value.load_byte_item();
1643     } else  {
1644       value.load_item();
1645     }
1646   } else {
1647     value.load_for_store(field_type);
1648   }
1649 
1650   set_no_result(x);
1651 
1652 #ifndef PRODUCT
1653   if (PrintNotLoaded && needs_patching) {
1654     tty->print_cr("   ###class not loaded at store_%s bci %d",
1655                   x->is_static() ?  "static" : "field", x->printable_bci());
1656   }
1657 #endif
1658 
1659   if (x->needs_null_check() &&
1660       (needs_patching ||
1661        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1662     // Emit an explicit null check because the offset is too large.
1663     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1664     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1665     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1666   }
1667 
1668   DecoratorSet decorators = IN_HEAP;
1669   if (is_volatile) {
1670     decorators |= MO_SEQ_CST;
1671   }
1672   if (needs_patching) {
1673     decorators |= C1_NEEDS_PATCHING;
1674   }
1675 
1676   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1677                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1678 }
1679 
1680 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1681   assert(x->is_pinned(),"");
1682   bool needs_range_check = x->compute_needs_range_check();
1683   bool use_length = x->length() != NULL;
1684   bool obj_store = is_reference_type(x->elt_type());
1685   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
1686                                          !get_jobject_constant(x->value())->is_null_object() ||
1687                                          x->should_profile());
1688 
1689   LIRItem array(x->array(), this);
1690   LIRItem index(x->index(), this);
1691   LIRItem value(x->value(), this);
1692   LIRItem length(this);
1693 
1694   array.load_item();
1695   index.load_nonconstant();
1696 
1697   if (use_length && needs_range_check) {
1698     length.set_instruction(x->length());
1699     length.load_item();
1700 
1701   }
1702   if (needs_store_check || x->check_boolean()) {
1703     value.load_item();
1704   } else {
1705     value.load_for_store(x->elt_type());
1706   }
1707 
1708   set_no_result(x);
1709 
1710   // the CodeEmitInfo must be duplicated for each different
1711   // LIR-instruction because spilling can occur anywhere between two
1712   // instructions and so the debug information must be different
1713   CodeEmitInfo* range_check_info = state_for(x);
1714   CodeEmitInfo* null_check_info = NULL;
1715   if (x->needs_null_check()) {
1716     null_check_info = new CodeEmitInfo(range_check_info);
1717   }
1718 
1719   if (GenerateRangeChecks && needs_range_check) {
1720     if (use_length) {
1721       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1722       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1723     } else {
1724       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1725       // range_check also does the null check
1726       null_check_info = NULL;
1727     }
1728   }
1729 
1730   if (GenerateArrayStoreCheck && needs_store_check) {
1731     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1732     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1733   }
1734 
1735   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1736   if (x->check_boolean()) {
1737     decorators |= C1_MASK_BOOLEAN;
1738   }
1739 
1740   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1741                   NULL, null_check_info);
1742 }
1743 
1744 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1745                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1746                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1747   decorators |= ACCESS_READ;
1748   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1749   if (access.is_raw()) {
1750     _barrier_set->BarrierSetC1::load_at(access, result);
1751   } else {
1752     _barrier_set->load_at(access, result);
1753   }
1754 }
1755 
1756 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1757                                LIR_Opr addr, LIR_Opr result) {
1758   decorators |= ACCESS_READ;
1759   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1760   access.set_resolved_addr(addr);
1761   if (access.is_raw()) {
1762     _barrier_set->BarrierSetC1::load(access, result);
1763   } else {
1764     _barrier_set->load(access, result);
1765   }
1766 }
1767 
1768 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1769                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
1770                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
1771   decorators |= ACCESS_WRITE;
1772   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
1773   if (access.is_raw()) {
1774     _barrier_set->BarrierSetC1::store_at(access, value);
1775   } else {
1776     _barrier_set->store_at(access, value);
1777   }
1778 }
1779 
1780 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1781                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1782   decorators |= ACCESS_READ;
1783   decorators |= ACCESS_WRITE;
1784   // Atomic operations are SEQ_CST by default
1785   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1786   LIRAccess access(this, decorators, base, offset, type);
1787   if (access.is_raw()) {
1788     return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
1789   } else {
1790     return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
1791   }
1792 }
1793 
1794 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
1795                                             LIRItem& base, LIRItem& offset, LIRItem& value) {
1796   decorators |= ACCESS_READ;
1797   decorators |= ACCESS_WRITE;
1798   // Atomic operations are SEQ_CST by default
1799   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1800   LIRAccess access(this, decorators, base, offset, type);
1801   if (access.is_raw()) {
1802     return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
1803   } else {
1804     return _barrier_set->atomic_xchg_at(access, value);
1805   }
1806 }
1807 
1808 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1809                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
1810   decorators |= ACCESS_READ;
1811   decorators |= ACCESS_WRITE;
1812   // Atomic operations are SEQ_CST by default
1813   decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1814   LIRAccess access(this, decorators, base, offset, type);
1815   if (access.is_raw()) {
1816     return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1817   } else {
1818     return _barrier_set->atomic_add_at(access, value);
1819   }
1820 }
1821 
1822 void LIRGenerator::do_LoadField(LoadField* x) {
1823   bool needs_patching = x->needs_patching();
1824   bool is_volatile = x->field()->is_volatile();
1825   BasicType field_type = x->field_type();
1826 
1827   CodeEmitInfo* info = NULL;
1828   if (needs_patching) {
1829     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1830     info = state_for(x, x->state_before());
1831   } else if (x->needs_null_check()) {
1832     NullCheck* nc = x->explicit_null_check();
1833     if (nc == NULL) {
1834       info = state_for(x);
1835     } else {
1836       info = state_for(nc);
1837     }
1838   }
1839 
1840   LIRItem object(x->obj(), this);
1841 
1842   object.load_item();
1843 
1844 #ifndef PRODUCT
1845   if (PrintNotLoaded && needs_patching) {
1846     tty->print_cr("   ###class not loaded at load_%s bci %d",
1847                   x->is_static() ?  "static" : "field", x->printable_bci());
1848   }
1849 #endif
1850 
1851   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1852   if (x->needs_null_check() &&
1853       (needs_patching ||
1854        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1855        stress_deopt)) {
1856     LIR_Opr obj = object.result();
1857     if (stress_deopt) {
1858       obj = new_register(T_OBJECT);
1859       __ move(LIR_OprFact::oopConst(NULL), obj);
1860     }
1861     // Emit an explicit null check because the offset is too large.
1862     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1863     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1864     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1865   }
1866 
1867   DecoratorSet decorators = IN_HEAP;
1868   if (is_volatile) {
1869     decorators |= MO_SEQ_CST;
1870   }
1871   if (needs_patching) {
1872     decorators |= C1_NEEDS_PATCHING;
1873   }
1874 
1875   LIR_Opr result = rlock_result(x, field_type);
1876   access_load_at(decorators, field_type,
1877                  object, LIR_OprFact::intConst(x->offset()), result,
1878                  info ? new CodeEmitInfo(info) : NULL, info);
1879 }
1880 
1881 
1882 //------------------------java.nio.Buffer.checkIndex------------------------
1883 
1884 // int java.nio.Buffer.checkIndex(int)
1885 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1886   // NOTE: by the time we are in checkIndex() we are guaranteed that
1887   // the buffer is non-null (because checkIndex is package-private and
1888   // only called from within other methods in the buffer).
1889   assert(x->number_of_arguments() == 2, "wrong type");
1890   LIRItem buf  (x->argument_at(0), this);
1891   LIRItem index(x->argument_at(1), this);
1892   buf.load_item();
1893   index.load_item();
1894 
1895   LIR_Opr result = rlock_result(x);
1896   if (GenerateRangeChecks) {
1897     CodeEmitInfo* info = state_for(x);
1898     CodeStub* stub = new RangeCheckStub(info, index.result());
1899     if (index.result()->is_constant()) {
1900       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1901       __ branch(lir_cond_belowEqual, stub);
1902     } else {
1903       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1904                   java_nio_Buffer::limit_offset(), T_INT, info);
1905       __ branch(lir_cond_aboveEqual, stub);
1906     }
1907     __ move(index.result(), result);
1908   } else {
1909     // Just load the index into the result register
1910     __ move(index.result(), result);
1911   }
1912 }
1913 
1914 
1915 //------------------------array access--------------------------------------
1916 
1917 
1918 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1919   LIRItem array(x->array(), this);
1920   array.load_item();
1921   LIR_Opr reg = rlock_result(x);
1922 
1923   CodeEmitInfo* info = NULL;
1924   if (x->needs_null_check()) {
1925     NullCheck* nc = x->explicit_null_check();
1926     if (nc == NULL) {
1927       info = state_for(x);
1928     } else {
1929       info = state_for(nc);
1930     }
1931     if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1932       LIR_Opr obj = new_register(T_OBJECT);
1933       __ move(LIR_OprFact::oopConst(NULL), obj);
1934       __ null_check(obj, new CodeEmitInfo(info));
1935     }
1936   }
1937   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1938 }
1939 
1940 
1941 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1942   bool use_length = x->length() != NULL;
1943   LIRItem array(x->array(), this);
1944   LIRItem index(x->index(), this);
1945   LIRItem length(this);
1946   bool needs_range_check = x->compute_needs_range_check();
1947 
1948   if (use_length && needs_range_check) {
1949     length.set_instruction(x->length());
1950     length.load_item();
1951   }
1952 
1953   array.load_item();
1954   if (index.is_constant() && can_inline_as_constant(x->index())) {
1955     // let it be a constant
1956     index.dont_load_item();
1957   } else {
1958     index.load_item();
1959   }
1960 
1961   CodeEmitInfo* range_check_info = state_for(x);
1962   CodeEmitInfo* null_check_info = NULL;
1963   if (x->needs_null_check()) {
1964     NullCheck* nc = x->explicit_null_check();
1965     if (nc != NULL) {
1966       null_check_info = state_for(nc);
1967     } else {
1968       null_check_info = range_check_info;
1969     }
1970     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1971       LIR_Opr obj = new_register(T_OBJECT);
1972       __ move(LIR_OprFact::oopConst(NULL), obj);
1973       __ null_check(obj, new CodeEmitInfo(null_check_info));
1974     }
1975   }
1976 
1977   if (GenerateRangeChecks && needs_range_check) {
1978     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1979       __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
1980     } else if (use_length) {
1981       // TODO: use a (modified) version of array_range_check that does not require a
1982       //       constant length to be loaded to a register
1983       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1984       __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
1985     } else {
1986       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1987       // The range check performs the null check, so clear it out for the load
1988       null_check_info = NULL;
1989     }
1990   }
1991 
1992   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1993 
1994   LIR_Opr result = rlock_result(x, x->elt_type());
1995   access_load_at(decorators, x->elt_type(),
1996                  array, index.result(), result,
1997                  NULL, null_check_info);
1998 }
1999 
2000 
2001 void LIRGenerator::do_NullCheck(NullCheck* x) {
2002   if (x->can_trap()) {
2003     LIRItem value(x->obj(), this);
2004     value.load_item();
2005     CodeEmitInfo* info = state_for(x);
2006     __ null_check(value.result(), info);
2007   }
2008 }
2009 
2010 
2011 void LIRGenerator::do_TypeCast(TypeCast* x) {
2012   LIRItem value(x->obj(), this);
2013   value.load_item();
2014   // the result is the same as from the node we are casting
2015   set_result(x, value.result());
2016 }
2017 
2018 
2019 void LIRGenerator::do_Throw(Throw* x) {
2020   LIRItem exception(x->exception(), this);
2021   exception.load_item();
2022   set_no_result(x);
2023   LIR_Opr exception_opr = exception.result();
2024   CodeEmitInfo* info = state_for(x, x->state());
2025 
2026 #ifndef PRODUCT
2027   if (PrintC1Statistics) {
2028     increment_counter(Runtime1::throw_count_address(), T_INT);
2029   }
2030 #endif
2031 
2032   // check if the instruction has an xhandler in any of the nested scopes
2033   bool unwind = false;
2034   if (info->exception_handlers()->length() == 0) {
2035     // this throw is not inside an xhandler
2036     unwind = true;
2037   } else {
2038     // get some idea of the throw type
2039     bool type_is_exact = true;
2040     ciType* throw_type = x->exception()->exact_type();
2041     if (throw_type == NULL) {
2042       type_is_exact = false;
2043       throw_type = x->exception()->declared_type();
2044     }
2045     if (throw_type != NULL && throw_type->is_instance_klass()) {
2046       ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2047       unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2048     }
2049   }
2050 
2051   // do null check before moving exception oop into fixed register
2052   // to avoid a fixed interval with an oop during the null check.
2053   // Use a copy of the CodeEmitInfo because debug information is
2054   // different for null_check and throw.
2055   if (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL) {
2056     // if the exception object wasn't created using new then it might be null.
2057     __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2058   }
2059 
2060   if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2061     // we need to go through the exception lookup path to get JVMTI
2062     // notification done
2063     unwind = false;
2064   }
2065 
2066   // move exception oop into fixed register
2067   __ move(exception_opr, exceptionOopOpr());
2068 
2069   if (unwind) {
2070     __ unwind_exception(exceptionOopOpr());
2071   } else {
2072     __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2073   }
2074 }
2075 
2076 
2077 void LIRGenerator::do_RoundFP(RoundFP* x) {
2078   assert(strict_fp_requires_explicit_rounding, "not required");
2079 
2080   LIRItem input(x->input(), this);
2081   input.load_item();
2082   LIR_Opr input_opr = input.result();
2083   assert(input_opr->is_register(), "why round if value is not in a register?");
2084   assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2085   if (input_opr->is_single_fpu()) {
2086     set_result(x, round_item(input_opr)); // This code path not currently taken
2087   } else {
2088     LIR_Opr result = new_register(T_DOUBLE);
2089     set_vreg_flag(result, must_start_in_memory);
2090     __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2091     set_result(x, result);
2092   }
2093 }
2094 
2095 // Here UnsafeGetRaw may have x->base() and x->index() be int or long
2096 // on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
2097 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2098   LIRItem base(x->base(), this);
2099   LIRItem idx(this);
2100 
2101   base.load_item();
2102   if (x->has_index()) {
2103     idx.set_instruction(x->index());
2104     idx.load_nonconstant();
2105   }
2106 
2107   LIR_Opr reg = rlock_result(x, x->basic_type());
2108 
2109   int   log2_scale = 0;
2110   if (x->has_index()) {
2111     log2_scale = x->log2_scale();
2112   }
2113 
2114   assert(!x->has_index() || idx.value() == x->index(), "should match");
2115 
2116   LIR_Opr base_op = base.result();
2117   LIR_Opr index_op = idx.result();
2118 #ifndef _LP64
2119   if (base_op->type() == T_LONG) {
2120     base_op = new_register(T_INT);
2121     __ convert(Bytecodes::_l2i, base.result(), base_op);
2122   }
2123   if (x->has_index()) {
2124     if (index_op->type() == T_LONG) {
2125       LIR_Opr long_index_op = index_op;
2126       if (index_op->is_constant()) {
2127         long_index_op = new_register(T_LONG);
2128         __ move(index_op, long_index_op);
2129       }
2130       index_op = new_register(T_INT);
2131       __ convert(Bytecodes::_l2i, long_index_op, index_op);
2132     } else {
2133       assert(x->index()->type()->tag() == intTag, "must be");
2134     }
2135   }
2136   // At this point base and index should be all ints.
2137   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2138   assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2139 #else
2140   if (x->has_index()) {
2141     if (index_op->type() == T_INT) {
2142       if (!index_op->is_constant()) {
2143         index_op = new_register(T_LONG);
2144         __ convert(Bytecodes::_i2l, idx.result(), index_op);
2145       }
2146     } else {
2147       assert(index_op->type() == T_LONG, "must be");
2148       if (index_op->is_constant()) {
2149         index_op = new_register(T_LONG);
2150         __ move(idx.result(), index_op);
2151       }
2152     }
2153   }
2154   // At this point base is a long non-constant
2155   // Index is a long register or a int constant.
2156   // We allow the constant to stay an int because that would allow us a more compact encoding by
2157   // embedding an immediate offset in the address expression. If we have a long constant, we have to
2158   // move it into a register first.
2159   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2160   assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2161                             (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2162 #endif
2163 
2164   BasicType dst_type = x->basic_type();
2165 
2166   LIR_Address* addr;
2167   if (index_op->is_constant()) {
2168     assert(log2_scale == 0, "must not have a scale");
2169     assert(index_op->type() == T_INT, "only int constants supported");
2170     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2171   } else {
2172 #ifdef X86
2173     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2174 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2175     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2176 #else
2177     if (index_op->is_illegal() || log2_scale == 0) {
2178       addr = new LIR_Address(base_op, index_op, dst_type);
2179     } else {
2180       LIR_Opr tmp = new_pointer_register();
2181       __ shift_left(index_op, log2_scale, tmp);
2182       addr = new LIR_Address(base_op, tmp, dst_type);
2183     }
2184 #endif
2185   }
2186 
2187   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2188     __ unaligned_move(addr, reg);
2189   } else {
2190     if (dst_type == T_OBJECT && x->is_wide()) {
2191       __ move_wide(addr, reg);
2192     } else {
2193       __ move(addr, reg);
2194     }
2195   }
2196 }
2197 
2198 
2199 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2200   int  log2_scale = 0;
2201   BasicType type = x->basic_type();
2202 
2203   if (x->has_index()) {
2204     log2_scale = x->log2_scale();
2205   }
2206 
2207   LIRItem base(x->base(), this);
2208   LIRItem value(x->value(), this);
2209   LIRItem idx(this);
2210 
2211   base.load_item();
2212   if (x->has_index()) {
2213     idx.set_instruction(x->index());
2214     idx.load_item();
2215   }
2216 
2217   if (type == T_BYTE || type == T_BOOLEAN) {
2218     value.load_byte_item();
2219   } else {
2220     value.load_item();
2221   }
2222 
2223   set_no_result(x);
2224 
2225   LIR_Opr base_op = base.result();
2226   LIR_Opr index_op = idx.result();
2227 
2228 #ifdef GENERATE_ADDRESS_IS_PREFERRED
2229   LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2230 #else
2231 #ifndef _LP64
2232   if (base_op->type() == T_LONG) {
2233     base_op = new_register(T_INT);
2234     __ convert(Bytecodes::_l2i, base.result(), base_op);
2235   }
2236   if (x->has_index()) {
2237     if (index_op->type() == T_LONG) {
2238       index_op = new_register(T_INT);
2239       __ convert(Bytecodes::_l2i, idx.result(), index_op);
2240     }
2241   }
2242   // At this point base and index should be all ints and not constants
2243   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2244   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2245 #else
2246   if (x->has_index()) {
2247     if (index_op->type() == T_INT) {
2248       index_op = new_register(T_LONG);
2249       __ convert(Bytecodes::_i2l, idx.result(), index_op);
2250     }
2251   }
2252   // At this point base and index are long and non-constant
2253   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2254   assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2255 #endif
2256 
2257   if (log2_scale != 0) {
2258     // temporary fix (platform dependent code without shift on Intel would be better)
2259     // TODO: ARM also allows embedded shift in the address
2260     LIR_Opr tmp = new_pointer_register();
2261     if (TwoOperandLIRForm) {
2262       __ move(index_op, tmp);
2263       index_op = tmp;
2264     }
2265     __ shift_left(index_op, log2_scale, tmp);
2266     if (!TwoOperandLIRForm) {
2267       index_op = tmp;
2268     }
2269   }
2270 
2271   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2272 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2273   __ move(value.result(), addr);
2274 }
2275 
2276 
2277 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2278   BasicType type = x->basic_type();
2279   LIRItem src(x->object(), this);
2280   LIRItem off(x->offset(), this);
2281 
2282   off.load_item();
2283   src.load_item();
2284 
2285   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2286 
2287   if (x->is_volatile()) {
2288     decorators |= MO_SEQ_CST;
2289   }
2290   if (type == T_BOOLEAN) {
2291     decorators |= C1_MASK_BOOLEAN;
2292   }
2293   if (is_reference_type(type)) {
2294     decorators |= ON_UNKNOWN_OOP_REF;
2295   }
2296 
2297   LIR_Opr result = rlock_result(x, type);
2298   access_load_at(decorators, type,
2299                  src, off.result(), result);
2300 }
2301 
2302 
2303 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2304   BasicType type = x->basic_type();
2305   LIRItem src(x->object(), this);
2306   LIRItem off(x->offset(), this);
2307   LIRItem data(x->value(), this);
2308 
2309   src.load_item();
2310   if (type == T_BOOLEAN || type == T_BYTE) {
2311     data.load_byte_item();
2312   } else {
2313     data.load_item();
2314   }
2315   off.load_item();
2316 
2317   set_no_result(x);
2318 
2319   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2320   if (is_reference_type(type)) {
2321     decorators |= ON_UNKNOWN_OOP_REF;
2322   }
2323   if (x->is_volatile()) {
2324     decorators |= MO_SEQ_CST;
2325   }
2326   access_store_at(decorators, type, src, off.result(), data.result());
2327 }
2328 
2329 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
2330   BasicType type = x->basic_type();
2331   LIRItem src(x->object(), this);
2332   LIRItem off(x->offset(), this);
2333   LIRItem value(x->value(), this);
2334 
2335   DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;
2336 
2337   if (is_reference_type(type)) {
2338     decorators |= ON_UNKNOWN_OOP_REF;
2339   }
2340 
2341   LIR_Opr result;
2342   if (x->is_add()) {
2343     result = access_atomic_add_at(decorators, type, src, off, value);
2344   } else {
2345     result = access_atomic_xchg_at(decorators, type, src, off, value);
2346   }
2347   set_result(x, result);
2348 }
2349 
2350 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2351   int lng = x->length();
2352 
2353   for (int i = 0; i < lng; i++) {
2354     C1SwitchRange* one_range = x->at(i);
2355     int low_key = one_range->low_key();
2356     int high_key = one_range->high_key();
2357     BlockBegin* dest = one_range->sux();
2358     if (low_key == high_key) {
2359       __ cmp(lir_cond_equal, value, low_key);
2360       __ branch(lir_cond_equal, dest);
2361     } else if (high_key - low_key == 1) {
2362       __ cmp(lir_cond_equal, value, low_key);
2363       __ branch(lir_cond_equal, dest);
2364       __ cmp(lir_cond_equal, value, high_key);
2365       __ branch(lir_cond_equal, dest);
2366     } else {
2367       LabelObj* L = new LabelObj();
2368       __ cmp(lir_cond_less, value, low_key);
2369       __ branch(lir_cond_less, L->label());
2370       __ cmp(lir_cond_lessEqual, value, high_key);
2371       __ branch(lir_cond_lessEqual, dest);
2372       __ branch_destination(L->label());
2373     }
2374   }
2375   __ jump(default_sux);
2376 }
2377 
2378 
2379 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2380   SwitchRangeList* res = new SwitchRangeList();
2381   int len = x->length();
2382   if (len > 0) {
2383     BlockBegin* sux = x->sux_at(0);
2384     int key = x->lo_key();
2385     BlockBegin* default_sux = x->default_sux();
2386     C1SwitchRange* range = new C1SwitchRange(key, sux);
2387     for (int i = 0; i < len; i++, key++) {
2388       BlockBegin* new_sux = x->sux_at(i);
2389       if (sux == new_sux) {
2390         // still in same range
2391         range->set_high_key(key);
2392       } else {
2393         // skip tests which explicitly dispatch to the default
2394         if (sux != default_sux) {
2395           res->append(range);
2396         }
2397         range = new C1SwitchRange(key, new_sux);
2398       }
2399       sux = new_sux;
2400     }
2401     if (res->length() == 0 || res->last() != range)  res->append(range);
2402   }
2403   return res;
2404 }
2405 
2406 
2407 // we expect the keys to be sorted by increasing value
2408 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2409   SwitchRangeList* res = new SwitchRangeList();
2410   int len = x->length();
2411   if (len > 0) {
2412     BlockBegin* default_sux = x->default_sux();
2413     int key = x->key_at(0);
2414     BlockBegin* sux = x->sux_at(0);
2415     C1SwitchRange* range = new C1SwitchRange(key, sux);
2416     for (int i = 1; i < len; i++) {
2417       int new_key = x->key_at(i);
2418       BlockBegin* new_sux = x->sux_at(i);
2419       if (key+1 == new_key && sux == new_sux) {
2420         // still in same range
2421         range->set_high_key(new_key);
2422       } else {
2423         // skip tests which explicitly dispatch to the default
2424         if (range->sux() != default_sux) {
2425           res->append(range);
2426         }
2427         range = new C1SwitchRange(new_key, new_sux);
2428       }
2429       key = new_key;
2430       sux = new_sux;
2431     }
2432     if (res->length() == 0 || res->last() != range)  res->append(range);
2433   }
2434   return res;
2435 }
2436 
2437 
2438 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2439   LIRItem tag(x->tag(), this);
2440   tag.load_item();
2441   set_no_result(x);
2442 
2443   if (x->is_safepoint()) {
2444     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2445   }
2446 
2447   // move values into phi locations
2448   move_to_phi(x->state());
2449 
2450   int lo_key = x->lo_key();
2451   int len = x->length();
2452   assert(lo_key <= (lo_key + (len - 1)), "integer overflow");
2453   LIR_Opr value = tag.result();
2454 
2455   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2456     ciMethod* method = x->state()->scope()->method();
2457     ciMethodData* md = method->method_data_or_null();
2458     assert(md != NULL, "Sanity");
2459     ciProfileData* data = md->bci_to_data(x->state()->bci());
2460     assert(data != NULL, "must have profiling data");
2461     assert(data->is_MultiBranchData(), "bad profile data?");
2462     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2463     LIR_Opr md_reg = new_register(T_METADATA);
2464     __ metadata2reg(md->constant_encoding(), md_reg);
2465     LIR_Opr data_offset_reg = new_pointer_register();
2466     LIR_Opr tmp_reg = new_pointer_register();
2467 
2468     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2469     for (int i = 0; i < len; i++) {
2470       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2471       __ cmp(lir_cond_equal, value, i + lo_key);
2472       __ move(data_offset_reg, tmp_reg);
2473       __ cmove(lir_cond_equal,
2474                LIR_OprFact::intptrConst(count_offset),
2475                tmp_reg,
2476                data_offset_reg, T_INT);
2477     }
2478 
2479     LIR_Opr data_reg = new_pointer_register();
2480     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2481     __ move(data_addr, data_reg);
2482     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2483     __ move(data_reg, data_addr);
2484   }
2485 
2486   if (UseTableRanges) {
2487     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2488   } else {
2489     for (int i = 0; i < len; i++) {
2490       __ cmp(lir_cond_equal, value, i + lo_key);
2491       __ branch(lir_cond_equal, x->sux_at(i));
2492     }
2493     __ jump(x->default_sux());
2494   }
2495 }
2496 
2497 
2498 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2499   LIRItem tag(x->tag(), this);
2500   tag.load_item();
2501   set_no_result(x);
2502 
2503   if (x->is_safepoint()) {
2504     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2505   }
2506 
2507   // move values into phi locations
2508   move_to_phi(x->state());
2509 
2510   LIR_Opr value = tag.result();
2511   int len = x->length();
2512 
2513   if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2514     ciMethod* method = x->state()->scope()->method();
2515     ciMethodData* md = method->method_data_or_null();
2516     assert(md != NULL, "Sanity");
2517     ciProfileData* data = md->bci_to_data(x->state()->bci());
2518     assert(data != NULL, "must have profiling data");
2519     assert(data->is_MultiBranchData(), "bad profile data?");
2520     int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2521     LIR_Opr md_reg = new_register(T_METADATA);
2522     __ metadata2reg(md->constant_encoding(), md_reg);
2523     LIR_Opr data_offset_reg = new_pointer_register();
2524     LIR_Opr tmp_reg = new_pointer_register();
2525 
2526     __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2527     for (int i = 0; i < len; i++) {
2528       int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2529       __ cmp(lir_cond_equal, value, x->key_at(i));
2530       __ move(data_offset_reg, tmp_reg);
2531       __ cmove(lir_cond_equal,
2532                LIR_OprFact::intptrConst(count_offset),
2533                tmp_reg,
2534                data_offset_reg, T_INT);
2535     }
2536 
2537     LIR_Opr data_reg = new_pointer_register();
2538     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2539     __ move(data_addr, data_reg);
2540     __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2541     __ move(data_reg, data_addr);
2542   }
2543 
2544   if (UseTableRanges) {
2545     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2546   } else {
2547     int len = x->length();
2548     for (int i = 0; i < len; i++) {
2549       __ cmp(lir_cond_equal, value, x->key_at(i));
2550       __ branch(lir_cond_equal, x->sux_at(i));
2551     }
2552     __ jump(x->default_sux());
2553   }
2554 }
2555 
2556 
2557 void LIRGenerator::do_Goto(Goto* x) {
2558   set_no_result(x);
2559 
2560   if (block()->next()->as_OsrEntry()) {
2561     // need to free up storage used for OSR entry point
2562     LIR_Opr osrBuffer = block()->next()->operand();
2563     BasicTypeList signature;
2564     signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2565     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2566     __ move(osrBuffer, cc->args()->at(0));
2567     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2568                          getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2569   }
2570 
2571   if (x->is_safepoint()) {
2572     ValueStack* state = x->state_before() ? x->state_before() : x->state();
2573 
2574     // increment backedge counter if needed
2575     CodeEmitInfo* info = state_for(x, state);
2576     increment_backedge_counter(info, x->profiled_bci());
2577     CodeEmitInfo* safepoint_info = state_for(x, state);
2578     __ safepoint(safepoint_poll_register(), safepoint_info);
2579   }
2580 
2581   // Gotos can be folded Ifs, handle this case.
2582   if (x->should_profile()) {
2583     ciMethod* method = x->profiled_method();
2584     assert(method != NULL, "method should be set if branch is profiled");
2585     ciMethodData* md = method->method_data_or_null();
2586     assert(md != NULL, "Sanity");
2587     ciProfileData* data = md->bci_to_data(x->profiled_bci());
2588     assert(data != NULL, "must have profiling data");
2589     int offset;
2590     if (x->direction() == Goto::taken) {
2591       assert(data->is_BranchData(), "need BranchData for two-way branches");
2592       offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2593     } else if (x->direction() == Goto::not_taken) {
2594       assert(data->is_BranchData(), "need BranchData for two-way branches");
2595       offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2596     } else {
2597       assert(data->is_JumpData(), "need JumpData for branches");
2598       offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2599     }
2600     LIR_Opr md_reg = new_register(T_METADATA);
2601     __ metadata2reg(md->constant_encoding(), md_reg);
2602 
2603     increment_counter(new LIR_Address(md_reg, offset,
2604                                       NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2605   }
2606 
2607   // emit phi-instruction move after safepoint since this simplifies
2608   // describing the state as the safepoint.
2609   move_to_phi(x->state());
2610 
2611   __ jump(x->default_sux());
2612 }
2613 
2614 /**
2615  * Emit profiling code if needed for arguments, parameters, return value types
2616  *
2617  * @param md                    MDO the code will update at runtime
2618  * @param md_base_offset        common offset in the MDO for this profile and subsequent ones
2619  * @param md_offset             offset in the MDO (on top of md_base_offset) for this profile
2620  * @param profiled_k            current profile
2621  * @param obj                   IR node for the object to be profiled
2622  * @param mdp                   register to hold the pointer inside the MDO (md + md_base_offset).
2623  *                              Set once we find an update to make and use for next ones.
2624  * @param not_null              true if we know obj cannot be null
2625  * @param signature_at_call_k   signature at call for obj
2626  * @param callee_signature_k    signature of callee for obj
2627  *                              at call and callee signatures differ at method handle call
2628  * @return                      the only klass we know will ever be seen at this profile point
2629  */
2630 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2631                                     Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2632                                     ciKlass* callee_signature_k) {
2633   ciKlass* result = NULL;
2634   bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2635   bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2636   // known not to be null or null bit already set and already set to
2637   // unknown: nothing we can do to improve profiling
2638   if (!do_null && !do_update) {
2639     return result;
2640   }
2641 
2642   ciKlass* exact_klass = NULL;
2643   Compilation* comp = Compilation::current();
2644   if (do_update) {
2645     // try to find exact type, using CHA if possible, so that loading
2646     // the klass from the object can be avoided
2647     ciType* type = obj->exact_type();
2648     if (type == NULL) {
2649       type = obj->declared_type();
2650       type = comp->cha_exact_type(type);
2651     }
2652     assert(type == NULL || type->is_klass(), "type should be class");
2653     exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2654 
2655     do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2656   }
2657 
2658   if (!do_null && !do_update) {
2659     return result;
2660   }
2661 
2662   ciKlass* exact_signature_k = NULL;
2663   if (do_update) {
2664     // Is the type from the signature exact (the only one possible)?
2665     exact_signature_k = signature_at_call_k->exact_klass();
2666     if (exact_signature_k == NULL) {
2667       exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2668     } else {
2669       result = exact_signature_k;
2670       // Known statically. No need to emit any code: prevent
2671       // LIR_Assembler::emit_profile_type() from emitting useless code
2672       profiled_k = ciTypeEntries::with_status(result, profiled_k);
2673     }
2674     // exact_klass and exact_signature_k can be both non NULL but
2675     // different if exact_klass is loaded after the ciObject for
2676     // exact_signature_k is created.
2677     if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2678       // sometimes the type of the signature is better than the best type
2679       // the compiler has
2680       exact_klass = exact_signature_k;
2681     }
2682     if (callee_signature_k != NULL &&
2683         callee_signature_k != signature_at_call_k) {
2684       ciKlass* improved_klass = callee_signature_k->exact_klass();
2685       if (improved_klass == NULL) {
2686         improved_klass = comp->cha_exact_type(callee_signature_k);
2687       }
2688       if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2689         exact_klass = exact_signature_k;
2690       }
2691     }
2692     do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2693   }
2694 
2695   if (!do_null && !do_update) {
2696     return result;
2697   }
2698 
2699   if (mdp == LIR_OprFact::illegalOpr) {
2700     mdp = new_register(T_METADATA);
2701     __ metadata2reg(md->constant_encoding(), mdp);
2702     if (md_base_offset != 0) {
2703       LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2704       mdp = new_pointer_register();
2705       __ leal(LIR_OprFact::address(base_type_address), mdp);
2706     }
2707   }
2708   LIRItem value(obj, this);
2709   value.load_item();
2710   __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2711                   value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2712   return result;
2713 }
2714 
2715 // profile parameters on entry to the root of the compilation
2716 void LIRGenerator::profile_parameters(Base* x) {
2717   if (compilation()->profile_parameters()) {
2718     CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2719     ciMethodData* md = scope()->method()->method_data_or_null();
2720     assert(md != NULL, "Sanity");
2721 
2722     if (md->parameters_type_data() != NULL) {
2723       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2724       ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
2725       LIR_Opr mdp = LIR_OprFact::illegalOpr;
2726       for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2727         LIR_Opr src = args->at(i);
2728         assert(!src->is_illegal(), "check");
2729         BasicType t = src->type();
2730         if (is_reference_type(t)) {
2731           intptr_t profiled_k = parameters->type(j);
2732           Local* local = x->state()->local_at(java_index)->as_Local();
2733           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2734                                         in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2735                                         profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2736           // If the profile is known statically set it once for all and do not emit any code
2737           if (exact != NULL) {
2738             md->set_parameter_type(j, exact);
2739           }
2740           j++;
2741         }
2742         java_index += type2size[t];
2743       }
2744     }
2745   }
2746 }
2747 
2748 void LIRGenerator::do_Base(Base* x) {
2749   __ std_entry(LIR_OprFact::illegalOpr);
2750   // Emit moves from physical registers / stack slots to virtual registers
2751   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2752   IRScope* irScope = compilation()->hir()->top_scope();
2753   int java_index = 0;
2754   for (int i = 0; i < args->length(); i++) {
2755     LIR_Opr src = args->at(i);
2756     assert(!src->is_illegal(), "check");
2757     BasicType t = src->type();
2758 
2759     // Types which are smaller than int are passed as int, so
2760     // correct the type which passed.
2761     switch (t) {
2762     case T_BYTE:
2763     case T_BOOLEAN:
2764     case T_SHORT:
2765     case T_CHAR:
2766       t = T_INT;
2767       break;
2768     default:
2769       break;
2770     }
2771 
2772     LIR_Opr dest = new_register(t);
2773     __ move(src, dest);
2774 
2775     // Assign new location to Local instruction for this local
2776     Local* local = x->state()->local_at(java_index)->as_Local();
2777     assert(local != NULL, "Locals for incoming arguments must have been created");
2778 #ifndef __SOFTFP__
2779     // The java calling convention passes double as long and float as int.
2780     assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2781 #endif // __SOFTFP__
2782     local->set_operand(dest);
2783     _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2784     java_index += type2size[t];
2785   }
2786 
2787   if (compilation()->env()->dtrace_method_probes()) {
2788     BasicTypeList signature;
2789     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
2790     signature.append(T_METADATA); // Method*
2791     LIR_OprList* args = new LIR_OprList();
2792     args->append(getThreadPointer());
2793     LIR_Opr meth = new_register(T_METADATA);
2794     __ metadata2reg(method()->constant_encoding(), meth);
2795     args->append(meth);
2796     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2797   }
2798 
2799   if (method()->is_synchronized()) {
2800     LIR_Opr obj;
2801     if (method()->is_static()) {
2802       obj = new_register(T_OBJECT);
2803       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2804     } else {
2805       Local* receiver = x->state()->local_at(0)->as_Local();
2806       assert(receiver != NULL, "must already exist");
2807       obj = receiver->operand();
2808     }
2809     assert(obj->is_valid(), "must be valid");
2810 
2811     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2812       LIR_Opr lock = syncLockOpr();
2813       __ load_stack_address_monitor(0, lock);
2814 
2815       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2816       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2817 
2818       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2819       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2820     }
2821   }
2822   if (compilation()->age_code()) {
2823     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2824     decrement_age(info);
2825   }
2826   // increment invocation counters if needed
2827   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2828     profile_parameters(x);
2829     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2830     increment_invocation_counter(info);
2831   }
2832 
2833   // all blocks with a successor must end with an unconditional jump
2834   // to the successor even if they are consecutive
2835   __ jump(x->default_sux());
2836 }
2837 
2838 
2839 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2840   // construct our frame and model the production of incoming pointer
2841   // to the OSR buffer.
2842   __ osr_entry(LIR_Assembler::osrBufferPointer());
2843   LIR_Opr result = rlock_result(x);
2844   __ move(LIR_Assembler::osrBufferPointer(), result);
2845 }
2846 
2847 
2848 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2849   assert(args->length() == arg_list->length(),
2850          "args=%d, arg_list=%d", args->length(), arg_list->length());
2851   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2852     LIRItem* param = args->at(i);
2853     LIR_Opr loc = arg_list->at(i);
2854     if (loc->is_register()) {
2855       param->load_item_force(loc);
2856     } else {
2857       LIR_Address* addr = loc->as_address_ptr();
2858       param->load_for_store(addr->type());
2859       if (addr->type() == T_OBJECT) {
2860         __ move_wide(param->result(), addr);
2861       } else
2862         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2863           __ unaligned_move(param->result(), addr);
2864         } else {
2865           __ move(param->result(), addr);
2866         }
2867     }
2868   }
2869 
2870   if (x->has_receiver()) {
2871     LIRItem* receiver = args->at(0);
2872     LIR_Opr loc = arg_list->at(0);
2873     if (loc->is_register()) {
2874       receiver->load_item_force(loc);
2875     } else {
2876       assert(loc->is_address(), "just checking");
2877       receiver->load_for_store(T_OBJECT);
2878       __ move_wide(receiver->result(), loc->as_address_ptr());
2879     }
2880   }
2881 }
2882 
2883 
2884 // Visits all arguments, returns appropriate items without loading them
2885 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2886   LIRItemList* argument_items = new LIRItemList();
2887   if (x->has_receiver()) {
2888     LIRItem* receiver = new LIRItem(x->receiver(), this);
2889     argument_items->append(receiver);
2890   }
2891   for (int i = 0; i < x->number_of_arguments(); i++) {
2892     LIRItem* param = new LIRItem(x->argument_at(i), this);
2893     argument_items->append(param);
2894   }
2895   return argument_items;
2896 }
2897 
2898 
2899 // The invoke with receiver has following phases:
2900 //   a) traverse and load/lock receiver;
2901 //   b) traverse all arguments -> item-array (invoke_visit_argument)
2902 //   c) push receiver on stack
2903 //   d) load each of the items and push on stack
2904 //   e) unlock receiver
2905 //   f) move receiver into receiver-register %o0
2906 //   g) lock result registers and emit call operation
2907 //
2908 // Before issuing a call, we must spill-save all values on stack
2909 // that are in caller-save register. "spill-save" moves those registers
2910 // either in a free callee-save register or spills them if no free
2911 // callee save register is available.
2912 //
2913 // The problem is where to invoke spill-save.
2914 // - if invoked between e) and f), we may lock callee save
2915 //   register in "spill-save" that destroys the receiver register
2916 //   before f) is executed
2917 // - if we rearrange f) to be earlier (by loading %o0) it
2918 //   may destroy a value on the stack that is currently in %o0
2919 //   and is waiting to be spilled
2920 // - if we keep the receiver locked while doing spill-save,
2921 //   we cannot spill it as it is spill-locked
2922 //
2923 void LIRGenerator::do_Invoke(Invoke* x) {
2924   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2925 
2926   LIR_OprList* arg_list = cc->args();
2927   LIRItemList* args = invoke_visit_arguments(x);
2928   LIR_Opr receiver = LIR_OprFact::illegalOpr;
2929 
2930   // setup result register
2931   LIR_Opr result_register = LIR_OprFact::illegalOpr;
2932   if (x->type() != voidType) {
2933     result_register = result_register_for(x->type());
2934   }
2935 
2936   CodeEmitInfo* info = state_for(x, x->state());
2937 
2938   invoke_load_arguments(x, args, arg_list);
2939 
2940   if (x->has_receiver()) {
2941     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2942     receiver = args->at(0)->result();
2943   }
2944 
2945   // emit invoke code
2946   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2947 
2948   // JSR 292
2949   // Preserve the SP over MethodHandle call sites, if needed.
2950   ciMethod* target = x->target();
2951   bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2952                                   target->is_method_handle_intrinsic() ||
2953                                   target->is_compiled_lambda_form());
2954   if (is_method_handle_invoke) {
2955     info->set_is_method_handle_invoke(true);
2956     if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2957         __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2958     }
2959   }
2960 
2961   switch (x->code()) {
2962     case Bytecodes::_invokestatic:
2963       __ call_static(target, result_register,
2964                      SharedRuntime::get_resolve_static_call_stub(),
2965                      arg_list, info);
2966       break;
2967     case Bytecodes::_invokespecial:
2968     case Bytecodes::_invokevirtual:
2969     case Bytecodes::_invokeinterface:
2970       // for loaded and final (method or class) target we still produce an inline cache,
2971       // in order to be able to call mixed mode
2972       if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {
2973         __ call_opt_virtual(target, receiver, result_register,
2974                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2975                             arg_list, info);
2976       } else {
2977         __ call_icvirtual(target, receiver, result_register,
2978                           SharedRuntime::get_resolve_virtual_call_stub(),
2979                           arg_list, info);
2980       }
2981       break;
2982     case Bytecodes::_invokedynamic: {
2983       __ call_dynamic(target, receiver, result_register,
2984                       SharedRuntime::get_resolve_static_call_stub(),
2985                       arg_list, info);
2986       break;
2987     }
2988     default:
2989       fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
2990       break;
2991   }
2992 
2993   // JSR 292
2994   // Restore the SP after MethodHandle call sites, if needed.
2995   if (is_method_handle_invoke
2996       && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2997     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2998   }
2999 
3000   if (result_register->is_valid()) {
3001     LIR_Opr result = rlock_result(x);
3002     __ move(result_register, result);
3003   }
3004 }
3005 
3006 
3007 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3008   assert(x->number_of_arguments() == 1, "wrong type");
3009   LIRItem value       (x->argument_at(0), this);
3010   LIR_Opr reg = rlock_result(x);
3011   value.load_item();
3012   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3013   __ move(tmp, reg);
3014 }
3015 
3016 
3017 
3018 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3019 void LIRGenerator::do_IfOp(IfOp* x) {
3020 #ifdef ASSERT
3021   {
3022     ValueTag xtag = x->x()->type()->tag();
3023     ValueTag ttag = x->tval()->type()->tag();
3024     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3025     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3026     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3027   }
3028 #endif
3029 
3030   LIRItem left(x->x(), this);
3031   LIRItem right(x->y(), this);
3032   left.load_item();
3033   if (can_inline_as_constant(right.value())) {
3034     right.dont_load_item();
3035   } else {
3036     right.load_item();
3037   }
3038 
3039   LIRItem t_val(x->tval(), this);
3040   LIRItem f_val(x->fval(), this);
3041   t_val.dont_load_item();
3042   f_val.dont_load_item();
3043   LIR_Opr reg = rlock_result(x);
3044 
3045   __ cmp(lir_cond(x->cond()), left.result(), right.result());
3046   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3047 }
3048 
3049 #ifdef JFR_HAVE_INTRINSICS
3050 
3051 void LIRGenerator::do_getEventWriter(Intrinsic* x) {
3052   LabelObj* L_end = new LabelObj();
3053 
3054   // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
3055   // meaning of these two is mixed up (see JDK-8026837).
3056   LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),
3057                                            in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR),
3058                                            T_ADDRESS);
3059   LIR_Opr result = rlock_result(x);
3060   __ move(LIR_OprFact::oopConst(NULL), result);
3061   LIR_Opr jobj = new_register(T_METADATA);
3062   __ move_wide(jobj_addr, jobj);
3063   __ cmp(lir_cond_equal, jobj, LIR_OprFact::metadataConst(0));
3064   __ branch(lir_cond_equal, L_end->label());
3065 
3066   access_load(IN_NATIVE, T_OBJECT, LIR_OprFact::address(new LIR_Address(jobj, T_OBJECT)), result);
3067 
3068   __ branch_destination(L_end->label());
3069 }
3070 
3071 #endif
3072 
3073 
3074 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
3075   assert(x->number_of_arguments() == 0, "wrong type");
3076   // Enforce computation of _reserved_argument_area_size which is required on some platforms.
3077   BasicTypeList signature;
3078   CallingConvention* cc = frame_map()->c_calling_convention(&signature);
3079   LIR_Opr reg = result_register_for(x->type());
3080   __ call_runtime_leaf(routine, getThreadTemp(),
3081                        reg, new LIR_OprList());
3082   LIR_Opr result = rlock_result(x);
3083   __ move(reg, result);
3084 }
3085 
3086 
3087 
3088 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3089   switch (x->id()) {
3090   case vmIntrinsics::_intBitsToFloat      :
3091   case vmIntrinsics::_doubleToRawLongBits :
3092   case vmIntrinsics::_longBitsToDouble    :
3093   case vmIntrinsics::_floatToRawIntBits   : {
3094     do_FPIntrinsics(x);
3095     break;
3096   }
3097 
3098 #ifdef JFR_HAVE_INTRINSICS
3099   case vmIntrinsics::_getEventWriter:
3100     do_getEventWriter(x);
3101     break;
3102   case vmIntrinsics::_counterTime:
3103     do_RuntimeCall(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), x);
3104     break;
3105 #endif
3106 
3107   case vmIntrinsics::_currentTimeMillis:
3108     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x);
3109     break;
3110 
3111   case vmIntrinsics::_nanoTime:
3112     do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x);
3113     break;
3114 
3115   case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
3116   case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
3117   case vmIntrinsics::_isPrimitive:    do_isPrimitive(x);   break;
3118   case vmIntrinsics::_getModifiers:   do_getModifiers(x);  break;
3119   case vmIntrinsics::_getClass:       do_getClass(x);      break;
3120   case vmIntrinsics::_currentThread:  do_currentThread(x); break;
3121   case vmIntrinsics::_getObjectSize:  do_getObjectSize(x); break;
3122 
3123   case vmIntrinsics::_dlog:           // fall through
3124   case vmIntrinsics::_dlog10:         // fall through
3125   case vmIntrinsics::_dabs:           // fall through
3126   case vmIntrinsics::_dsqrt:          // fall through
3127   case vmIntrinsics::_dtan:           // fall through
3128   case vmIntrinsics::_dsin :          // fall through
3129   case vmIntrinsics::_dcos :          // fall through
3130   case vmIntrinsics::_dexp :          // fall through
3131   case vmIntrinsics::_dpow :          do_MathIntrinsic(x); break;
3132   case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
3133 
3134   case vmIntrinsics::_fmaD:           do_FmaIntrinsic(x); break;
3135   case vmIntrinsics::_fmaF:           do_FmaIntrinsic(x); break;
3136 
3137   // java.nio.Buffer.checkIndex
3138   case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
3139 
3140   case vmIntrinsics::_compareAndSetReference:
3141     do_CompareAndSwap(x, objectType);
3142     break;
3143   case vmIntrinsics::_compareAndSetInt:
3144     do_CompareAndSwap(x, intType);
3145     break;
3146   case vmIntrinsics::_compareAndSetLong:
3147     do_CompareAndSwap(x, longType);
3148     break;
3149 
3150   case vmIntrinsics::_loadFence :
3151     __ membar_acquire();
3152     break;
3153   case vmIntrinsics::_storeFence:
3154     __ membar_release();
3155     break;
3156   case vmIntrinsics::_storeStoreFence:
3157     __ membar_storestore();
3158     break;
3159   case vmIntrinsics::_fullFence :
3160     __ membar();
3161     break;
3162   case vmIntrinsics::_onSpinWait:
3163     __ on_spin_wait();
3164     break;
3165   case vmIntrinsics::_Reference_get:
3166     do_Reference_get(x);
3167     break;
3168 
3169   case vmIntrinsics::_updateCRC32:
3170   case vmIntrinsics::_updateBytesCRC32:
3171   case vmIntrinsics::_updateByteBufferCRC32:
3172     do_update_CRC32(x);
3173     break;
3174 
3175   case vmIntrinsics::_updateBytesCRC32C:
3176   case vmIntrinsics::_updateDirectByteBufferCRC32C:
3177     do_update_CRC32C(x);
3178     break;
3179 
3180   case vmIntrinsics::_vectorizedMismatch:
3181     do_vectorizedMismatch(x);
3182     break;
3183 
3184   case vmIntrinsics::_blackhole:
3185     do_blackhole(x);
3186     break;
3187 
3188   default: ShouldNotReachHere(); break;
3189   }
3190 }
3191 
3192 void LIRGenerator::profile_arguments(ProfileCall* x) {
3193   if (compilation()->profile_arguments()) {
3194     int bci = x->bci_of_invoke();
3195     ciMethodData* md = x->method()->method_data_or_null();
3196     assert(md != NULL, "Sanity");
3197     ciProfileData* data = md->bci_to_data(bci);
3198     if (data != NULL) {
3199       if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3200           (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3201         ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3202         int base_offset = md->byte_offset_of_slot(data, extra);
3203         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3204         ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3205 
3206         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3207         int start = 0;
3208         int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3209         if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3210           // first argument is not profiled at call (method handle invoke)
3211           assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3212           start = 1;
3213         }
3214         ciSignature* callee_signature = x->callee()->signature();
3215         // method handle call to virtual method
3216         bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3217         ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3218 
3219         bool ignored_will_link;
3220         ciSignature* signature_at_call = NULL;
3221         x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3222         ciSignatureStream signature_at_call_stream(signature_at_call);
3223 
3224         // if called through method handle invoke, some arguments may have been popped
3225         for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3226           int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3227           ciKlass* exact = profile_type(md, base_offset, off,
3228               args->type(i), x->profiled_arg_at(i+start), mdp,
3229               !x->arg_needs_null_check(i+start),
3230               signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3231           if (exact != NULL) {
3232             md->set_argument_type(bci, i, exact);
3233           }
3234         }
3235       } else {
3236 #ifdef ASSERT
3237         Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3238         int n = x->nb_profiled_args();
3239         assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3240             (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3241             "only at JSR292 bytecodes");
3242 #endif
3243       }
3244     }
3245   }
3246 }
3247 
3248 // profile parameters on entry to an inlined method
3249 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3250   if (compilation()->profile_parameters() && x->inlined()) {
3251     ciMethodData* md = x->callee()->method_data_or_null();
3252     if (md != NULL) {
3253       ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3254       if (parameters_type_data != NULL) {
3255         ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
3256         LIR_Opr mdp = LIR_OprFact::illegalOpr;
3257         bool has_receiver = !x->callee()->is_static();
3258         ciSignature* sig = x->callee()->signature();
3259         ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3260         int i = 0; // to iterate on the Instructions
3261         Value arg = x->recv();
3262         bool not_null = false;
3263         int bci = x->bci_of_invoke();
3264         Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3265         // The first parameter is the receiver so that's what we start
3266         // with if it exists. One exception is method handle call to
3267         // virtual method: the receiver is in the args list
3268         if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3269           i = 1;
3270           arg = x->profiled_arg_at(0);
3271           not_null = !x->arg_needs_null_check(0);
3272         }
3273         int k = 0; // to iterate on the profile data
3274         for (;;) {
3275           intptr_t profiled_k = parameters->type(k);
3276           ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3277                                         in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3278                                         profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3279           // If the profile is known statically set it once for all and do not emit any code
3280           if (exact != NULL) {
3281             md->set_parameter_type(k, exact);
3282           }
3283           k++;
3284           if (k >= parameters_type_data->number_of_parameters()) {
3285 #ifdef ASSERT
3286             int extra = 0;
3287             if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3288                 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3289                 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3290               extra += 1;
3291             }
3292             assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3293 #endif
3294             break;
3295           }
3296           arg = x->profiled_arg_at(i);
3297           not_null = !x->arg_needs_null_check(i);
3298           i++;
3299         }
3300       }
3301     }
3302   }
3303 }
3304 
3305 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3306   // Need recv in a temporary register so it interferes with the other temporaries
3307   LIR_Opr recv = LIR_OprFact::illegalOpr;
3308   LIR_Opr mdo = new_register(T_METADATA);
3309   // tmp is used to hold the counters on SPARC
3310   LIR_Opr tmp = new_pointer_register();
3311 
3312   if (x->nb_profiled_args() > 0) {
3313     profile_arguments(x);
3314   }
3315 
3316   // profile parameters on inlined method entry including receiver
3317   if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3318     profile_parameters_at_call(x);
3319   }
3320 
3321   if (x->recv() != NULL) {
3322     LIRItem value(x->recv(), this);
3323     value.load_item();
3324     recv = new_register(T_OBJECT);
3325     __ move(value.result(), recv);
3326   }
3327   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3328 }
3329 
3330 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3331   int bci = x->bci_of_invoke();
3332   ciMethodData* md = x->method()->method_data_or_null();
3333   assert(md != NULL, "Sanity");
3334   ciProfileData* data = md->bci_to_data(bci);
3335   if (data != NULL) {
3336     assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3337     ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3338     LIR_Opr mdp = LIR_OprFact::illegalOpr;
3339 
3340     bool ignored_will_link;
3341     ciSignature* signature_at_call = NULL;
3342     x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3343 
3344     // The offset within the MDO of the entry to update may be too large
3345     // to be used in load/store instructions on some platforms. So have
3346     // profile_type() compute the address of the profile in a register.
3347     ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3348         ret->type(), x->ret(), mdp,
3349         !x->needs_null_check(),
3350         signature_at_call->return_type()->as_klass(),
3351         x->callee()->signature()->return_type()->as_klass());
3352     if (exact != NULL) {
3353       md->set_return_type(bci, exact);
3354     }
3355   }
3356 }
3357 
3358 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3359   // We can safely ignore accessors here, since c2 will inline them anyway,
3360   // accessors are also always mature.
3361   if (!x->inlinee()->is_accessor()) {
3362     CodeEmitInfo* info = state_for(x, x->state(), true);
3363     // Notify the runtime very infrequently only to take care of counter overflows
3364     int freq_log = Tier23InlineeNotifyFreqLog;
3365     double scale;
3366     if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3367       freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3368     }
3369     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3370   }
3371 }
3372 
3373 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3374   if (compilation()->count_backedges()) {
3375 #if defined(X86) && !defined(_LP64)
3376     // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3377     LIR_Opr left_copy = new_register(left->type());
3378     __ move(left, left_copy);
3379     __ cmp(cond, left_copy, right);
3380 #else
3381     __ cmp(cond, left, right);
3382 #endif
3383     LIR_Opr step = new_register(T_INT);
3384     LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment);
3385     LIR_Opr zero = LIR_OprFact::intConst(0);
3386     __ cmove(cond,
3387         (left_bci < bci) ? plus_one : zero,
3388         (right_bci < bci) ? plus_one : zero,
3389         step, left->type());
3390     increment_backedge_counter(info, step, bci);
3391   }
3392 }
3393 
3394 
3395 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) {
3396   int freq_log = 0;
3397   int level = compilation()->env()->comp_level();
3398   if (level == CompLevel_limited_profile) {
3399     freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3400   } else if (level == CompLevel_full_profile) {
3401     freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3402   } else {
3403     ShouldNotReachHere();
3404   }
3405   // Increment the appropriate invocation/backedge counter and notify the runtime.
3406   double scale;
3407   if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
3408     freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3409   }
3410   increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
3411 }
3412 
3413 void LIRGenerator::decrement_age(CodeEmitInfo* info) {
3414   ciMethod* method = info->scope()->method();
3415   MethodCounters* mc_adr = method->ensure_method_counters();
3416   if (mc_adr != NULL) {
3417     LIR_Opr mc = new_pointer_register();
3418     __ move(LIR_OprFact::intptrConst(mc_adr), mc);
3419     int offset = in_bytes(MethodCounters::nmethod_age_offset());
3420     LIR_Address* counter = new LIR_Address(mc, offset, T_INT);
3421     LIR_Opr result = new_register(T_INT);
3422     __ load(counter, result);
3423     __ sub(result, LIR_OprFact::intConst(1), result);
3424     __ store(result, counter);
3425     // DeoptimizeStub will reexecute from the current state in code info.
3426     CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
3427                                          Deoptimization::Action_make_not_entrant);
3428     __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
3429     __ branch(lir_cond_lessEqual, deopt);
3430   }
3431 }
3432 
3433 
3434 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3435                                                 ciMethod *method, LIR_Opr step, int frequency,
3436                                                 int bci, bool backedge, bool notify) {
3437   assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3438   int level = _compilation->env()->comp_level();
3439   assert(level > CompLevel_simple, "Shouldn't be here");
3440 
3441   int offset = -1;
3442   LIR_Opr counter_holder = NULL;
3443   if (level == CompLevel_limited_profile) {
3444     MethodCounters* counters_adr = method->ensure_method_counters();
3445     if (counters_adr == NULL) {
3446       bailout("method counters allocation failed");
3447       return;
3448     }
3449     counter_holder = new_pointer_register();
3450     __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3451     offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3452                                  MethodCounters::invocation_counter_offset());
3453   } else if (level == CompLevel_full_profile) {
3454     counter_holder = new_register(T_METADATA);
3455     offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3456                                  MethodData::invocation_counter_offset());
3457     ciMethodData* md = method->method_data_or_null();
3458     assert(md != NULL, "Sanity");
3459     __ metadata2reg(md->constant_encoding(), counter_holder);
3460   } else {
3461     ShouldNotReachHere();
3462   }
3463   LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3464   LIR_Opr result = new_register(T_INT);
3465   __ load(counter, result);
3466   __ add(result, step, result);
3467   __ store(result, counter);
3468   if (notify && (!backedge || UseOnStackReplacement)) {
3469     LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
3470     // The bci for info can point to cmp for if's we want the if bci
3471     CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3472     int freq = frequency << InvocationCounter::count_shift;
3473     if (freq == 0) {
3474       if (!step->is_constant()) {
3475         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3476         __ branch(lir_cond_notEqual, overflow);
3477       } else {
3478         __ branch(lir_cond_always, overflow);
3479       }
3480     } else {
3481       LIR_Opr mask = load_immediate(freq, T_INT);
3482       if (!step->is_constant()) {
3483         // If step is 0, make sure the overflow check below always fails
3484         __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3485         __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT);
3486       }
3487       __ logical_and(result, mask, result);
3488       __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3489       __ branch(lir_cond_equal, overflow);
3490     }
3491     __ branch_destination(overflow->continuation());
3492   }
3493 }
3494 
3495 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3496   LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3497   BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3498 
3499   if (x->pass_thread()) {
3500     signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
3501     args->append(getThreadPointer());
3502   }
3503 
3504   for (int i = 0; i < x->number_of_arguments(); i++) {
3505     Value a = x->argument_at(i);
3506     LIRItem* item = new LIRItem(a, this);
3507     item->load_item();
3508     args->append(item->result());
3509     signature->append(as_BasicType(a->type()));
3510   }
3511 
3512   LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3513   if (x->type() == voidType) {
3514     set_no_result(x);
3515   } else {
3516     __ move(result, rlock_result(x));
3517   }
3518 }
3519 
3520 #ifdef ASSERT
3521 void LIRGenerator::do_Assert(Assert *x) {
3522   ValueTag tag = x->x()->type()->tag();
3523   If::Condition cond = x->cond();
3524 
3525   LIRItem xitem(x->x(), this);
3526   LIRItem yitem(x->y(), this);
3527   LIRItem* xin = &xitem;
3528   LIRItem* yin = &yitem;
3529 
3530   assert(tag == intTag, "Only integer assertions are valid!");
3531 
3532   xin->load_item();
3533   yin->dont_load_item();
3534 
3535   set_no_result(x);
3536 
3537   LIR_Opr left = xin->result();
3538   LIR_Opr right = yin->result();
3539 
3540   __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3541 }
3542 #endif
3543 
3544 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3545 
3546 
3547   Instruction *a = x->x();
3548   Instruction *b = x->y();
3549   if (!a || StressRangeCheckElimination) {
3550     assert(!b || StressRangeCheckElimination, "B must also be null");
3551 
3552     CodeEmitInfo *info = state_for(x, x->state());
3553     CodeStub* stub = new PredicateFailedStub(info);
3554 
3555     __ jump(stub);
3556   } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3557     int a_int = a->type()->as_IntConstant()->value();
3558     int b_int = b->type()->as_IntConstant()->value();
3559 
3560     bool ok = false;
3561 
3562     switch(x->cond()) {
3563       case Instruction::eql: ok = (a_int == b_int); break;
3564       case Instruction::neq: ok = (a_int != b_int); break;
3565       case Instruction::lss: ok = (a_int < b_int); break;
3566       case Instruction::leq: ok = (a_int <= b_int); break;
3567       case Instruction::gtr: ok = (a_int > b_int); break;
3568       case Instruction::geq: ok = (a_int >= b_int); break;
3569       case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3570       case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3571       default: ShouldNotReachHere();
3572     }
3573 
3574     if (ok) {
3575 
3576       CodeEmitInfo *info = state_for(x, x->state());
3577       CodeStub* stub = new PredicateFailedStub(info);
3578 
3579       __ jump(stub);
3580     }
3581   } else {
3582 
3583     ValueTag tag = x->x()->type()->tag();
3584     If::Condition cond = x->cond();
3585     LIRItem xitem(x->x(), this);
3586     LIRItem yitem(x->y(), this);
3587     LIRItem* xin = &xitem;
3588     LIRItem* yin = &yitem;
3589 
3590     assert(tag == intTag, "Only integer deoptimizations are valid!");
3591 
3592     xin->load_item();
3593     yin->dont_load_item();
3594     set_no_result(x);
3595 
3596     LIR_Opr left = xin->result();
3597     LIR_Opr right = yin->result();
3598 
3599     CodeEmitInfo *info = state_for(x, x->state());
3600     CodeStub* stub = new PredicateFailedStub(info);
3601 
3602     __ cmp(lir_cond(cond), left, right);
3603     __ branch(lir_cond(cond), stub);
3604   }
3605 }
3606 
3607 void LIRGenerator::do_blackhole(Intrinsic *x) {
3608   assert(!x->has_receiver(), "Should have been checked before: only static methods here");
3609   for (int c = 0; c < x->number_of_arguments(); c++) {
3610     // Load the argument
3611     LIRItem vitem(x->argument_at(c), this);
3612     vitem.load_item();
3613     // ...and leave it unused.
3614   }
3615 }
3616 
3617 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3618   LIRItemList args(1);
3619   LIRItem value(arg1, this);
3620   args.append(&value);
3621   BasicTypeList signature;
3622   signature.append(as_BasicType(arg1->type()));
3623 
3624   return call_runtime(&signature, &args, entry, result_type, info);
3625 }
3626 
3627 
3628 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3629   LIRItemList args(2);
3630   LIRItem value1(arg1, this);
3631   LIRItem value2(arg2, this);
3632   args.append(&value1);
3633   args.append(&value2);
3634   BasicTypeList signature;
3635   signature.append(as_BasicType(arg1->type()));
3636   signature.append(as_BasicType(arg2->type()));
3637 
3638   return call_runtime(&signature, &args, entry, result_type, info);
3639 }
3640 
3641 
3642 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3643                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3644   // get a result register
3645   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3646   LIR_Opr result = LIR_OprFact::illegalOpr;
3647   if (result_type->tag() != voidTag) {
3648     result = new_register(result_type);
3649     phys_reg = result_register_for(result_type);
3650   }
3651 
3652   // move the arguments into the correct location
3653   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3654   assert(cc->length() == args->length(), "argument mismatch");
3655   for (int i = 0; i < args->length(); i++) {
3656     LIR_Opr arg = args->at(i);
3657     LIR_Opr loc = cc->at(i);
3658     if (loc->is_register()) {
3659       __ move(arg, loc);
3660     } else {
3661       LIR_Address* addr = loc->as_address_ptr();
3662 //           if (!can_store_as_constant(arg)) {
3663 //             LIR_Opr tmp = new_register(arg->type());
3664 //             __ move(arg, tmp);
3665 //             arg = tmp;
3666 //           }
3667       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3668         __ unaligned_move(arg, addr);
3669       } else {
3670         __ move(arg, addr);
3671       }
3672     }
3673   }
3674 
3675   if (info) {
3676     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3677   } else {
3678     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3679   }
3680   if (result->is_valid()) {
3681     __ move(phys_reg, result);
3682   }
3683   return result;
3684 }
3685 
3686 
3687 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3688                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
3689   // get a result register
3690   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3691   LIR_Opr result = LIR_OprFact::illegalOpr;
3692   if (result_type->tag() != voidTag) {
3693     result = new_register(result_type);
3694     phys_reg = result_register_for(result_type);
3695   }
3696 
3697   // move the arguments into the correct location
3698   CallingConvention* cc = frame_map()->c_calling_convention(signature);
3699 
3700   assert(cc->length() == args->length(), "argument mismatch");
3701   for (int i = 0; i < args->length(); i++) {
3702     LIRItem* arg = args->at(i);
3703     LIR_Opr loc = cc->at(i);
3704     if (loc->is_register()) {
3705       arg->load_item_force(loc);
3706     } else {
3707       LIR_Address* addr = loc->as_address_ptr();
3708       arg->load_for_store(addr->type());
3709       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3710         __ unaligned_move(arg->result(), addr);
3711       } else {
3712         __ move(arg->result(), addr);
3713       }
3714     }
3715   }
3716 
3717   if (info) {
3718     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3719   } else {
3720     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3721   }
3722   if (result->is_valid()) {
3723     __ move(phys_reg, result);
3724   }
3725   return result;
3726 }
3727 
3728 void LIRGenerator::do_MemBar(MemBar* x) {
3729   LIR_Code code = x->code();
3730   switch(code) {
3731   case lir_membar_acquire   : __ membar_acquire(); break;
3732   case lir_membar_release   : __ membar_release(); break;
3733   case lir_membar           : __ membar(); break;
3734   case lir_membar_loadload  : __ membar_loadload(); break;
3735   case lir_membar_storestore: __ membar_storestore(); break;
3736   case lir_membar_loadstore : __ membar_loadstore(); break;
3737   case lir_membar_storeload : __ membar_storeload(); break;
3738   default                   : ShouldNotReachHere(); break;
3739   }
3740 }
3741 
3742 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3743   LIR_Opr value_fixed = rlock_byte(T_BYTE);
3744   if (TwoOperandLIRForm) {
3745     __ move(value, value_fixed);
3746     __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3747   } else {
3748     __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3749   }
3750   LIR_Opr klass = new_register(T_METADATA);
3751   load_klass(array, klass, null_check_info);
3752   null_check_info = NULL;
3753   LIR_Opr layout = new_register(T_INT);
3754   __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3755   int diffbit = Klass::layout_helper_boolean_diffbit();
3756   __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3757   __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3758   __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3759   value = value_fixed;
3760   return value;
3761 }
3762 
3763 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3764   if (x->check_boolean()) {
3765     value = mask_boolean(array, value, null_check_info);
3766   }
3767   return value;
3768 }
--- EOF ---