1 /* 2 * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_Instruction.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_LIRGenerator.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArrayKlass.hpp" 34 #include "ci/ciFlatArrayKlass.hpp" 35 #include "ci/ciInlineKlass.hpp" 36 #include "ci/ciInstance.hpp" 37 #include "ci/ciObjArray.hpp" 38 #include "ci/ciUtilities.hpp" 39 #include "compiler/compilerDefinitions.inline.hpp" 40 #include "gc/shared/barrierSet.hpp" 41 #include "gc/shared/c1/barrierSetC1.hpp" 42 #include "oops/klass.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/vm_version.hpp" 46 #include "utilities/bitMap.inline.hpp" 47 #include "utilities/macros.hpp" 48 #include "utilities/powerOfTwo.hpp" 49 50 #ifdef ASSERT 51 #define __ gen()->lir(__FILE__, __LINE__)-> 52 #else 53 #define __ gen()->lir()-> 54 #endif 55 56 #ifndef PATCHED_ADDR 57 #define PATCHED_ADDR (max_jint) 58 #endif 59 60 void PhiResolverState::reset() { 61 _virtual_operands.clear(); 62 _other_operands.clear(); 63 _vreg_table.clear(); 64 } 65 66 67 //-------------------------------------------------------------- 68 // PhiResolver 69 70 // Resolves cycles: 71 // 72 // r1 := r2 becomes temp := r1 73 // r2 := r1 r1 := r2 74 // r2 := temp 75 // and orders moves: 76 // 77 // r2 := r3 becomes r1 := r2 78 // r1 := r2 r2 := r3 79 80 PhiResolver::PhiResolver(LIRGenerator* gen) 81 : _gen(gen) 82 , _state(gen->resolver_state()) 83 , _temp(LIR_OprFact::illegalOpr) 84 { 85 // reinitialize the shared state arrays 86 _state.reset(); 87 } 88 89 90 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) { 91 assert(src->is_valid(), ""); 92 assert(dest->is_valid(), ""); 93 __ move(src, dest); 94 } 95 96 97 void PhiResolver::move_temp_to(LIR_Opr dest) { 98 assert(_temp->is_valid(), ""); 99 emit_move(_temp, dest); 100 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr); 101 } 102 103 104 void PhiResolver::move_to_temp(LIR_Opr src) { 105 assert(_temp->is_illegal(), ""); 106 _temp = _gen->new_register(src->type()); 107 emit_move(src, _temp); 108 } 109 110 111 // Traverse assignment graph in depth first order and generate moves in post order 112 // ie. two assignments: b := c, a := b start with node c: 113 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a) 114 // Generates moves in this order: move b to a and move c to b 115 // ie. cycle a := b, b := a start with node a 116 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a) 117 // Generates moves in this order: move b to temp, move a to b, move temp to a 118 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) { 119 if (!dest->visited()) { 120 dest->set_visited(); 121 for (int i = dest->no_of_destinations()-1; i >= 0; i --) { 122 move(dest, dest->destination_at(i)); 123 } 124 } else if (!dest->start_node()) { 125 // cylce in graph detected 126 assert(_loop == NULL, "only one loop valid!"); 127 _loop = dest; 128 move_to_temp(src->operand()); 129 return; 130 } // else dest is a start node 131 132 if (!dest->assigned()) { 133 if (_loop == dest) { 134 move_temp_to(dest->operand()); 135 dest->set_assigned(); 136 } else if (src != NULL) { 137 emit_move(src->operand(), dest->operand()); 138 dest->set_assigned(); 139 } 140 } 141 } 142 143 144 PhiResolver::~PhiResolver() { 145 int i; 146 // resolve any cycles in moves from and to virtual registers 147 for (i = virtual_operands().length() - 1; i >= 0; i --) { 148 ResolveNode* node = virtual_operands().at(i); 149 if (!node->visited()) { 150 _loop = NULL; 151 move(NULL, node); 152 node->set_start_node(); 153 assert(_temp->is_illegal(), "move_temp_to() call missing"); 154 } 155 } 156 157 // generate move for move from non virtual register to abitrary destination 158 for (i = other_operands().length() - 1; i >= 0; i --) { 159 ResolveNode* node = other_operands().at(i); 160 for (int j = node->no_of_destinations() - 1; j >= 0; j --) { 161 emit_move(node->operand(), node->destination_at(j)->operand()); 162 } 163 } 164 } 165 166 167 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) { 168 ResolveNode* node; 169 if (opr->is_virtual()) { 170 int vreg_num = opr->vreg_number(); 171 node = vreg_table().at_grow(vreg_num, NULL); 172 assert(node == NULL || node->operand() == opr, ""); 173 if (node == NULL) { 174 node = new ResolveNode(opr); 175 vreg_table().at_put(vreg_num, node); 176 } 177 // Make sure that all virtual operands show up in the list when 178 // they are used as the source of a move. 179 if (source && !virtual_operands().contains(node)) { 180 virtual_operands().append(node); 181 } 182 } else { 183 assert(source, ""); 184 node = new ResolveNode(opr); 185 other_operands().append(node); 186 } 187 return node; 188 } 189 190 191 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) { 192 assert(dest->is_virtual(), ""); 193 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr(); 194 assert(src->is_valid(), ""); 195 assert(dest->is_valid(), ""); 196 ResolveNode* source = source_node(src); 197 source->append(destination_node(dest)); 198 } 199 200 201 //-------------------------------------------------------------- 202 // LIRItem 203 204 void LIRItem::set_result(LIR_Opr opr) { 205 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change"); 206 value()->set_operand(opr); 207 208 if (opr->is_virtual()) { 209 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL); 210 } 211 212 _result = opr; 213 } 214 215 void LIRItem::load_item() { 216 assert(!_gen->in_conditional_code(), "LIRItem cannot be loaded in conditional code"); 217 218 if (result()->is_illegal()) { 219 // update the items result 220 _result = value()->operand(); 221 } 222 if (!result()->is_register()) { 223 LIR_Opr reg = _gen->new_register(value()->type()); 224 __ move(result(), reg); 225 if (result()->is_constant()) { 226 _result = reg; 227 } else { 228 set_result(reg); 229 } 230 } 231 } 232 233 234 void LIRItem::load_for_store(BasicType type) { 235 if (_gen->can_store_as_constant(value(), type)) { 236 _result = value()->operand(); 237 if (!_result->is_constant()) { 238 _result = LIR_OprFact::value_type(value()->type()); 239 } 240 } else if (type == T_BYTE || type == T_BOOLEAN) { 241 load_byte_item(); 242 } else { 243 load_item(); 244 } 245 } 246 247 void LIRItem::load_item_force(LIR_Opr reg) { 248 LIR_Opr r = result(); 249 if (r != reg) { 250 #if !defined(ARM) && !defined(E500V2) 251 if (r->type() != reg->type()) { 252 // moves between different types need an intervening spill slot 253 r = _gen->force_to_spill(r, reg->type()); 254 } 255 #endif 256 __ move(r, reg); 257 _result = reg; 258 } 259 } 260 261 ciObject* LIRItem::get_jobject_constant() const { 262 ObjectType* oc = type()->as_ObjectType(); 263 if (oc) { 264 return oc->constant_value(); 265 } 266 return NULL; 267 } 268 269 270 jint LIRItem::get_jint_constant() const { 271 assert(is_constant() && value() != NULL, ""); 272 assert(type()->as_IntConstant() != NULL, "type check"); 273 return type()->as_IntConstant()->value(); 274 } 275 276 277 jint LIRItem::get_address_constant() const { 278 assert(is_constant() && value() != NULL, ""); 279 assert(type()->as_AddressConstant() != NULL, "type check"); 280 return type()->as_AddressConstant()->value(); 281 } 282 283 284 jfloat LIRItem::get_jfloat_constant() const { 285 assert(is_constant() && value() != NULL, ""); 286 assert(type()->as_FloatConstant() != NULL, "type check"); 287 return type()->as_FloatConstant()->value(); 288 } 289 290 291 jdouble LIRItem::get_jdouble_constant() const { 292 assert(is_constant() && value() != NULL, ""); 293 assert(type()->as_DoubleConstant() != NULL, "type check"); 294 return type()->as_DoubleConstant()->value(); 295 } 296 297 298 jlong LIRItem::get_jlong_constant() const { 299 assert(is_constant() && value() != NULL, ""); 300 assert(type()->as_LongConstant() != NULL, "type check"); 301 return type()->as_LongConstant()->value(); 302 } 303 304 305 306 //-------------------------------------------------------------- 307 308 309 void LIRGenerator::block_do_prolog(BlockBegin* block) { 310 #ifndef PRODUCT 311 if (PrintIRWithLIR) { 312 block->print(); 313 } 314 #endif 315 316 // set up the list of LIR instructions 317 assert(block->lir() == NULL, "LIR list already computed for this block"); 318 _lir = new LIR_List(compilation(), block); 319 block->set_lir(_lir); 320 321 __ branch_destination(block->label()); 322 323 if (LIRTraceExecution && 324 Compilation::current()->hir()->start()->block_id() != block->block_id() && 325 !block->is_set(BlockBegin::exception_entry_flag)) { 326 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst"); 327 trace_block_entry(block); 328 } 329 } 330 331 332 void LIRGenerator::block_do_epilog(BlockBegin* block) { 333 #ifndef PRODUCT 334 if (PrintIRWithLIR) { 335 tty->cr(); 336 } 337 #endif 338 339 // LIR_Opr for unpinned constants shouldn't be referenced by other 340 // blocks so clear them out after processing the block. 341 for (int i = 0; i < _unpinned_constants.length(); i++) { 342 _unpinned_constants.at(i)->clear_operand(); 343 } 344 _unpinned_constants.trunc_to(0); 345 346 // clear our any registers for other local constants 347 _constants.trunc_to(0); 348 _reg_for_constants.trunc_to(0); 349 } 350 351 352 void LIRGenerator::block_do(BlockBegin* block) { 353 CHECK_BAILOUT(); 354 355 block_do_prolog(block); 356 set_block(block); 357 358 for (Instruction* instr = block; instr != NULL; instr = instr->next()) { 359 if (instr->is_pinned()) do_root(instr); 360 } 361 362 set_block(NULL); 363 block_do_epilog(block); 364 } 365 366 367 //-------------------------LIRGenerator----------------------------- 368 369 // This is where the tree-walk starts; instr must be root; 370 void LIRGenerator::do_root(Value instr) { 371 CHECK_BAILOUT(); 372 373 InstructionMark im(compilation(), instr); 374 375 assert(instr->is_pinned(), "use only with roots"); 376 assert(instr->subst() == instr, "shouldn't have missed substitution"); 377 378 instr->visit(this); 379 380 assert(!instr->has_uses() || instr->operand()->is_valid() || 381 instr->as_Constant() != NULL || bailed_out(), "invalid item set"); 382 } 383 384 385 // This is called for each node in tree; the walk stops if a root is reached 386 void LIRGenerator::walk(Value instr) { 387 InstructionMark im(compilation(), instr); 388 //stop walk when encounter a root 389 if ((instr->is_pinned() && instr->as_Phi() == NULL) || instr->operand()->is_valid()) { 390 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited"); 391 } else { 392 assert(instr->subst() == instr, "shouldn't have missed substitution"); 393 instr->visit(this); 394 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use"); 395 } 396 } 397 398 399 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { 400 assert(state != NULL, "state must be defined"); 401 402 #ifndef PRODUCT 403 state->verify(); 404 #endif 405 406 ValueStack* s = state; 407 for_each_state(s) { 408 if (s->kind() == ValueStack::EmptyExceptionState) { 409 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty"); 410 continue; 411 } 412 413 int index; 414 Value value; 415 for_each_stack_value(s, index, value) { 416 assert(value->subst() == value, "missed substitution"); 417 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 418 walk(value); 419 assert(value->operand()->is_valid(), "must be evaluated now"); 420 } 421 } 422 423 int bci = s->bci(); 424 IRScope* scope = s->scope(); 425 ciMethod* method = scope->method(); 426 427 MethodLivenessResult liveness = method->liveness_at_bci(bci); 428 if (bci == SynchronizationEntryBCI) { 429 if (x->as_ExceptionObject() || x->as_Throw()) { 430 // all locals are dead on exit from the synthetic unlocker 431 liveness.clear(); 432 } else { 433 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke"); 434 } 435 } 436 if (!liveness.is_valid()) { 437 // Degenerate or breakpointed method. 438 bailout("Degenerate or breakpointed method"); 439 } else { 440 assert((int)liveness.size() == s->locals_size(), "error in use of liveness"); 441 for_each_local_value(s, index, value) { 442 assert(value->subst() == value, "missed substitution"); 443 if (liveness.at(index) && !value->type()->is_illegal()) { 444 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 445 walk(value); 446 assert(value->operand()->is_valid(), "must be evaluated now"); 447 } 448 } else { 449 // NULL out this local so that linear scan can assume that all non-NULL values are live. 450 s->invalidate_local(index); 451 } 452 } 453 } 454 } 455 456 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException)); 457 } 458 459 460 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) { 461 return state_for(x, x->exception_state()); 462 } 463 464 465 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) { 466 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation 467 * is active and the class hasn't yet been resolved we need to emit a patch that resolves 468 * the class. */ 469 if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) { 470 assert(info != NULL, "info must be set if class is not loaded"); 471 __ klass2reg_patch(NULL, r, info); 472 } else { 473 // no patching needed 474 __ metadata2reg(obj->constant_encoding(), r); 475 } 476 } 477 478 479 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index, 480 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) { 481 CodeStub* stub = new RangeCheckStub(range_check_info, index, array); 482 if (index->is_constant()) { 483 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(), 484 index->as_jint(), null_check_info); 485 __ branch(lir_cond_belowEqual, stub); // forward branch 486 } else { 487 cmp_reg_mem(lir_cond_aboveEqual, index, array, 488 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info); 489 __ branch(lir_cond_aboveEqual, stub); // forward branch 490 } 491 } 492 493 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp_op, CodeEmitInfo* info) { 494 LIR_Opr result_op = result; 495 LIR_Opr left_op = left; 496 LIR_Opr right_op = right; 497 498 if (two_operand_lir_form && left_op != result_op) { 499 assert(right_op != result_op, "malformed"); 500 __ move(left_op, result_op); 501 left_op = result_op; 502 } 503 504 switch(code) { 505 case Bytecodes::_dadd: 506 case Bytecodes::_fadd: 507 case Bytecodes::_ladd: 508 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break; 509 case Bytecodes::_fmul: 510 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break; 511 512 case Bytecodes::_dmul: __ mul(left_op, right_op, result_op, tmp_op); break; 513 514 case Bytecodes::_imul: 515 { 516 bool did_strength_reduce = false; 517 518 if (right->is_constant()) { 519 jint c = right->as_jint(); 520 if (c > 0 && is_power_of_2(c)) { 521 // do not need tmp here 522 __ shift_left(left_op, exact_log2(c), result_op); 523 did_strength_reduce = true; 524 } else { 525 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op); 526 } 527 } 528 // we couldn't strength reduce so just emit the multiply 529 if (!did_strength_reduce) { 530 __ mul(left_op, right_op, result_op); 531 } 532 } 533 break; 534 535 case Bytecodes::_dsub: 536 case Bytecodes::_fsub: 537 case Bytecodes::_lsub: 538 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break; 539 540 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break; 541 // ldiv and lrem are implemented with a direct runtime call 542 543 case Bytecodes::_ddiv: __ div(left_op, right_op, result_op, tmp_op); break; 544 545 case Bytecodes::_drem: 546 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break; 547 548 default: ShouldNotReachHere(); 549 } 550 } 551 552 553 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) { 554 arithmetic_op(code, result, left, right, tmp); 555 } 556 557 558 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) { 559 arithmetic_op(code, result, left, right, LIR_OprFact::illegalOpr, info); 560 } 561 562 563 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) { 564 arithmetic_op(code, result, left, right, tmp); 565 } 566 567 568 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) { 569 570 if (two_operand_lir_form && value != result_op 571 // Only 32bit right shifts require two operand form on S390. 572 S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) { 573 assert(count != result_op, "malformed"); 574 __ move(value, result_op); 575 value = result_op; 576 } 577 578 assert(count->is_constant() || count->is_register(), "must be"); 579 switch(code) { 580 case Bytecodes::_ishl: 581 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break; 582 case Bytecodes::_ishr: 583 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break; 584 case Bytecodes::_iushr: 585 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break; 586 default: ShouldNotReachHere(); 587 } 588 } 589 590 591 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) { 592 if (two_operand_lir_form && left_op != result_op) { 593 assert(right_op != result_op, "malformed"); 594 __ move(left_op, result_op); 595 left_op = result_op; 596 } 597 598 switch(code) { 599 case Bytecodes::_iand: 600 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break; 601 602 case Bytecodes::_ior: 603 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break; 604 605 case Bytecodes::_ixor: 606 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break; 607 608 default: ShouldNotReachHere(); 609 } 610 } 611 612 613 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, 614 CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub) { 615 if (!GenerateSynchronizationCode) return; 616 // for slow path, use debug info for state after successful locking 617 CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_imse_stub, scratch); 618 __ load_stack_address_monitor(monitor_no, lock); 619 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter 620 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_imse_stub); 621 } 622 623 624 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) { 625 if (!GenerateSynchronizationCode) return; 626 // setup registers 627 LIR_Opr hdr = lock; 628 lock = new_hdr; 629 CodeStub* slow_path = new MonitorExitStub(lock, !UseHeavyMonitors, monitor_no); 630 __ load_stack_address_monitor(monitor_no, lock); 631 __ unlock_object(hdr, object, lock, scratch, slow_path); 632 } 633 634 #ifndef PRODUCT 635 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) { 636 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) { 637 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci()); 638 } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) { 639 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci()); 640 } 641 } 642 #endif 643 644 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, bool allow_inline, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { 645 if (allow_inline) { 646 assert(!is_unresolved && klass->is_loaded(), "inline type klass should be resolved"); 647 __ metadata2reg(klass->constant_encoding(), klass_reg); 648 } else { 649 klass2reg_with_patching(klass_reg, klass, info, is_unresolved); 650 } 651 // If klass is not loaded we do not know if the klass has finalizers or is an unexpected inline klass 652 if (UseFastNewInstance && klass->is_loaded() && (allow_inline || !klass->is_inlinetype()) 653 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) { 654 655 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id; 656 657 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id); 658 659 assert(klass->is_loaded(), "must be loaded"); 660 // allocate space for instance 661 assert(klass->size_helper() > 0, "illegal instance size"); 662 const int instance_size = align_object_size(klass->size_helper()); 663 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, 664 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path); 665 } else { 666 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, allow_inline ? Runtime1::new_instance_id : Runtime1::new_instance_no_inline_id); 667 __ jump(slow_path); 668 __ branch_destination(slow_path->continuation()); 669 } 670 } 671 672 673 static bool is_constant_zero(Instruction* inst) { 674 IntConstant* c = inst->type()->as_IntConstant(); 675 if (c) { 676 return (c->value() == 0); 677 } 678 return false; 679 } 680 681 682 static bool positive_constant(Instruction* inst) { 683 IntConstant* c = inst->type()->as_IntConstant(); 684 if (c) { 685 return (c->value() >= 0); 686 } 687 return false; 688 } 689 690 691 static ciArrayKlass* as_array_klass(ciType* type) { 692 if (type != NULL && type->is_array_klass() && type->is_loaded()) { 693 return (ciArrayKlass*)type; 694 } else { 695 return NULL; 696 } 697 } 698 699 static ciType* phi_declared_type(Phi* phi) { 700 ciType* t = phi->operand_at(0)->declared_type(); 701 if (t == NULL) { 702 return NULL; 703 } 704 for(int i = 1; i < phi->operand_count(); i++) { 705 if (t != phi->operand_at(i)->declared_type()) { 706 return NULL; 707 } 708 } 709 return t; 710 } 711 712 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) { 713 Instruction* src = x->argument_at(0); 714 Instruction* src_pos = x->argument_at(1); 715 Instruction* dst = x->argument_at(2); 716 Instruction* dst_pos = x->argument_at(3); 717 Instruction* length = x->argument_at(4); 718 719 // first try to identify the likely type of the arrays involved 720 ciArrayKlass* expected_type = NULL; 721 bool is_exact = false, src_objarray = false, dst_objarray = false; 722 { 723 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type()); 724 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type()); 725 Phi* phi; 726 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) { 727 src_declared_type = as_array_klass(phi_declared_type(phi)); 728 } 729 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type()); 730 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type()); 731 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) { 732 dst_declared_type = as_array_klass(phi_declared_type(phi)); 733 } 734 735 if (src_exact_type != NULL && src_exact_type == dst_exact_type) { 736 // the types exactly match so the type is fully known 737 is_exact = true; 738 expected_type = src_exact_type; 739 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) { 740 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type; 741 ciArrayKlass* src_type = NULL; 742 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) { 743 src_type = (ciArrayKlass*) src_exact_type; 744 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) { 745 src_type = (ciArrayKlass*) src_declared_type; 746 } 747 if (src_type != NULL) { 748 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) { 749 is_exact = true; 750 expected_type = dst_type; 751 } 752 } 753 } 754 // at least pass along a good guess 755 if (expected_type == NULL) expected_type = dst_exact_type; 756 if (expected_type == NULL) expected_type = src_declared_type; 757 if (expected_type == NULL) expected_type = dst_declared_type; 758 759 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass()); 760 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass()); 761 } 762 763 // if a probable array type has been identified, figure out if any 764 // of the required checks for a fast case can be elided. 765 int flags = LIR_OpArrayCopy::all_flags; 766 767 if (!src->is_loaded_flattened_array() && !dst->is_loaded_flattened_array()) { 768 flags &= ~LIR_OpArrayCopy::always_slow_path; 769 } 770 if (!src->maybe_flattened_array()) { 771 flags &= ~LIR_OpArrayCopy::src_inlinetype_check; 772 } 773 if (!dst->maybe_flattened_array() && !dst->maybe_null_free_array()) { 774 flags &= ~LIR_OpArrayCopy::dst_inlinetype_check; 775 } 776 777 if (!src_objarray) 778 flags &= ~LIR_OpArrayCopy::src_objarray; 779 if (!dst_objarray) 780 flags &= ~LIR_OpArrayCopy::dst_objarray; 781 782 if (!x->arg_needs_null_check(0)) 783 flags &= ~LIR_OpArrayCopy::src_null_check; 784 if (!x->arg_needs_null_check(2)) 785 flags &= ~LIR_OpArrayCopy::dst_null_check; 786 787 788 if (expected_type != NULL) { 789 Value length_limit = NULL; 790 791 IfOp* ifop = length->as_IfOp(); 792 if (ifop != NULL) { 793 // look for expressions like min(v, a.length) which ends up as 794 // x > y ? y : x or x >= y ? y : x 795 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) && 796 ifop->x() == ifop->fval() && 797 ifop->y() == ifop->tval()) { 798 length_limit = ifop->y(); 799 } 800 } 801 802 // try to skip null checks and range checks 803 NewArray* src_array = src->as_NewArray(); 804 if (src_array != NULL) { 805 flags &= ~LIR_OpArrayCopy::src_null_check; 806 if (length_limit != NULL && 807 src_array->length() == length_limit && 808 is_constant_zero(src_pos)) { 809 flags &= ~LIR_OpArrayCopy::src_range_check; 810 } 811 } 812 813 NewArray* dst_array = dst->as_NewArray(); 814 if (dst_array != NULL) { 815 flags &= ~LIR_OpArrayCopy::dst_null_check; 816 if (length_limit != NULL && 817 dst_array->length() == length_limit && 818 is_constant_zero(dst_pos)) { 819 flags &= ~LIR_OpArrayCopy::dst_range_check; 820 } 821 } 822 823 // check from incoming constant values 824 if (positive_constant(src_pos)) 825 flags &= ~LIR_OpArrayCopy::src_pos_positive_check; 826 if (positive_constant(dst_pos)) 827 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check; 828 if (positive_constant(length)) 829 flags &= ~LIR_OpArrayCopy::length_positive_check; 830 831 // see if the range check can be elided, which might also imply 832 // that src or dst is non-null. 833 ArrayLength* al = length->as_ArrayLength(); 834 if (al != NULL) { 835 if (al->array() == src) { 836 // it's the length of the source array 837 flags &= ~LIR_OpArrayCopy::length_positive_check; 838 flags &= ~LIR_OpArrayCopy::src_null_check; 839 if (is_constant_zero(src_pos)) 840 flags &= ~LIR_OpArrayCopy::src_range_check; 841 } 842 if (al->array() == dst) { 843 // it's the length of the destination array 844 flags &= ~LIR_OpArrayCopy::length_positive_check; 845 flags &= ~LIR_OpArrayCopy::dst_null_check; 846 if (is_constant_zero(dst_pos)) 847 flags &= ~LIR_OpArrayCopy::dst_range_check; 848 } 849 } 850 if (is_exact) { 851 flags &= ~LIR_OpArrayCopy::type_check; 852 } 853 } 854 855 IntConstant* src_int = src_pos->type()->as_IntConstant(); 856 IntConstant* dst_int = dst_pos->type()->as_IntConstant(); 857 if (src_int && dst_int) { 858 int s_offs = src_int->value(); 859 int d_offs = dst_int->value(); 860 if (src_int->value() >= dst_int->value()) { 861 flags &= ~LIR_OpArrayCopy::overlapping; 862 } 863 if (expected_type != NULL) { 864 BasicType t = expected_type->element_type()->basic_type(); 865 int element_size = type2aelembytes(t); 866 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && 867 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) { 868 flags &= ~LIR_OpArrayCopy::unaligned; 869 } 870 } 871 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) { 872 // src and dest positions are the same, or dst is zero so assume 873 // nonoverlapping copy. 874 flags &= ~LIR_OpArrayCopy::overlapping; 875 } 876 877 if (src == dst) { 878 // moving within a single array so no type checks are needed 879 if (flags & LIR_OpArrayCopy::type_check) { 880 flags &= ~LIR_OpArrayCopy::type_check; 881 } 882 } 883 *flagsp = flags; 884 *expected_typep = (ciArrayKlass*)expected_type; 885 } 886 887 888 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) { 889 assert(opr->is_register(), "why spill if item is not register?"); 890 891 if (strict_fp_requires_explicit_rounding) { 892 #ifdef IA32 893 if (UseSSE < 1 && opr->is_single_fpu()) { 894 LIR_Opr result = new_register(T_FLOAT); 895 set_vreg_flag(result, must_start_in_memory); 896 assert(opr->is_register(), "only a register can be spilled"); 897 assert(opr->value_type()->is_float(), "rounding only for floats available"); 898 __ roundfp(opr, LIR_OprFact::illegalOpr, result); 899 return result; 900 } 901 #else 902 Unimplemented(); 903 #endif // IA32 904 } 905 return opr; 906 } 907 908 909 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) { 910 assert(type2size[t] == type2size[value->type()], 911 "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())); 912 if (!value->is_register()) { 913 // force into a register 914 LIR_Opr r = new_register(value->type()); 915 __ move(value, r); 916 value = r; 917 } 918 919 // create a spill location 920 LIR_Opr tmp = new_register(t); 921 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory); 922 923 // move from register to spill 924 __ move(value, tmp); 925 return tmp; 926 } 927 928 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { 929 if (if_instr->should_profile()) { 930 ciMethod* method = if_instr->profiled_method(); 931 assert(method != NULL, "method should be set if branch is profiled"); 932 ciMethodData* md = method->method_data_or_null(); 933 assert(md != NULL, "Sanity"); 934 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci()); 935 assert(data != NULL, "must have profiling data"); 936 assert(data->is_BranchData(), "need BranchData for two-way branches"); 937 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); 938 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); 939 if (if_instr->is_swapped()) { 940 int t = taken_count_offset; 941 taken_count_offset = not_taken_count_offset; 942 not_taken_count_offset = t; 943 } 944 945 LIR_Opr md_reg = new_register(T_METADATA); 946 __ metadata2reg(md->constant_encoding(), md_reg); 947 948 LIR_Opr data_offset_reg = new_pointer_register(); 949 __ cmove(lir_cond(cond), 950 LIR_OprFact::intptrConst(taken_count_offset), 951 LIR_OprFact::intptrConst(not_taken_count_offset), 952 data_offset_reg, as_BasicType(if_instr->x()->type())); 953 954 // MDO cells are intptr_t, so the data_reg width is arch-dependent. 955 LIR_Opr data_reg = new_pointer_register(); 956 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type()); 957 __ move(data_addr, data_reg); 958 // Use leal instead of add to avoid destroying condition codes on x86 959 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); 960 __ leal(LIR_OprFact::address(fake_incr_value), data_reg); 961 __ move(data_reg, data_addr); 962 } 963 } 964 965 // Phi technique: 966 // This is about passing live values from one basic block to the other. 967 // In code generated with Java it is rather rare that more than one 968 // value is on the stack from one basic block to the other. 969 // We optimize our technique for efficient passing of one value 970 // (of type long, int, double..) but it can be extended. 971 // When entering or leaving a basic block, all registers and all spill 972 // slots are release and empty. We use the released registers 973 // and spill slots to pass the live values from one block 974 // to the other. The topmost value, i.e., the value on TOS of expression 975 // stack is passed in registers. All other values are stored in spilling 976 // area. Every Phi has an index which designates its spill slot 977 // At exit of a basic block, we fill the register(s) and spill slots. 978 // At entry of a basic block, the block_prolog sets up the content of phi nodes 979 // and locks necessary registers and spilling slots. 980 981 982 // move current value to referenced phi function 983 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) { 984 Phi* phi = sux_val->as_Phi(); 985 // cur_val can be null without phi being null in conjunction with inlining 986 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) { 987 if (phi->is_local()) { 988 for (int i = 0; i < phi->operand_count(); i++) { 989 Value op = phi->operand_at(i); 990 if (op != NULL && op->type()->is_illegal()) { 991 bailout("illegal phi operand"); 992 } 993 } 994 } 995 Phi* cur_phi = cur_val->as_Phi(); 996 if (cur_phi != NULL && cur_phi->is_illegal()) { 997 // Phi and local would need to get invalidated 998 // (which is unexpected for Linear Scan). 999 // But this case is very rare so we simply bail out. 1000 bailout("propagation of illegal phi"); 1001 return; 1002 } 1003 LIR_Opr operand = cur_val->operand(); 1004 if (operand->is_illegal()) { 1005 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL, 1006 "these can be produced lazily"); 1007 operand = operand_for_instruction(cur_val); 1008 } 1009 resolver->move(operand, operand_for_instruction(phi)); 1010 } 1011 } 1012 1013 1014 // Moves all stack values into their PHI position 1015 void LIRGenerator::move_to_phi(ValueStack* cur_state) { 1016 BlockBegin* bb = block(); 1017 if (bb->number_of_sux() == 1) { 1018 BlockBegin* sux = bb->sux_at(0); 1019 assert(sux->number_of_preds() > 0, "invalid CFG"); 1020 1021 // a block with only one predecessor never has phi functions 1022 if (sux->number_of_preds() > 1) { 1023 PhiResolver resolver(this); 1024 1025 ValueStack* sux_state = sux->state(); 1026 Value sux_value; 1027 int index; 1028 1029 assert(cur_state->scope() == sux_state->scope(), "not matching"); 1030 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching"); 1031 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching"); 1032 1033 for_each_stack_value(sux_state, index, sux_value) { 1034 move_to_phi(&resolver, cur_state->stack_at(index), sux_value); 1035 } 1036 1037 for_each_local_value(sux_state, index, sux_value) { 1038 move_to_phi(&resolver, cur_state->local_at(index), sux_value); 1039 } 1040 1041 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal"); 1042 } 1043 } 1044 } 1045 1046 1047 LIR_Opr LIRGenerator::new_register(BasicType type) { 1048 int vreg_num = _virtual_register_number; 1049 // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out 1050 // a few extra registers before we really run out which helps to avoid to trip over assertions. 1051 if (vreg_num + 20 >= LIR_Opr::vreg_max) { 1052 bailout("out of virtual registers in LIR generator"); 1053 if (vreg_num + 2 >= LIR_Opr::vreg_max) { 1054 // Wrap it around and continue until bailout really happens to avoid hitting assertions. 1055 _virtual_register_number = LIR_Opr::vreg_base; 1056 vreg_num = LIR_Opr::vreg_base; 1057 } 1058 } 1059 _virtual_register_number += 1; 1060 LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type); 1061 assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers"); 1062 return vreg; 1063 } 1064 1065 1066 // Try to lock using register in hint 1067 LIR_Opr LIRGenerator::rlock(Value instr) { 1068 return new_register(instr->type()); 1069 } 1070 1071 1072 // does an rlock and sets result 1073 LIR_Opr LIRGenerator::rlock_result(Value x) { 1074 LIR_Opr reg = rlock(x); 1075 set_result(x, reg); 1076 return reg; 1077 } 1078 1079 1080 // does an rlock and sets result 1081 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) { 1082 LIR_Opr reg; 1083 switch (type) { 1084 case T_BYTE: 1085 case T_BOOLEAN: 1086 reg = rlock_byte(type); 1087 break; 1088 default: 1089 reg = rlock(x); 1090 break; 1091 } 1092 1093 set_result(x, reg); 1094 return reg; 1095 } 1096 1097 1098 //--------------------------------------------------------------------- 1099 ciObject* LIRGenerator::get_jobject_constant(Value value) { 1100 ObjectType* oc = value->type()->as_ObjectType(); 1101 if (oc) { 1102 return oc->constant_value(); 1103 } 1104 return NULL; 1105 } 1106 1107 1108 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) { 1109 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block"); 1110 assert(block()->next() == x, "ExceptionObject must be first instruction of block"); 1111 1112 // no moves are created for phi functions at the begin of exception 1113 // handlers, so assign operands manually here 1114 for_each_phi_fun(block(), phi, 1115 if (!phi->is_illegal()) { operand_for_instruction(phi); }); 1116 1117 LIR_Opr thread_reg = getThreadPointer(); 1118 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), 1119 exceptionOopOpr()); 1120 __ move_wide(LIR_OprFact::oopConst(NULL), 1121 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); 1122 __ move_wide(LIR_OprFact::oopConst(NULL), 1123 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); 1124 1125 LIR_Opr result = new_register(T_OBJECT); 1126 __ move(exceptionOopOpr(), result); 1127 set_result(x, result); 1128 } 1129 1130 1131 //---------------------------------------------------------------------- 1132 //---------------------------------------------------------------------- 1133 //---------------------------------------------------------------------- 1134 //---------------------------------------------------------------------- 1135 // visitor functions 1136 //---------------------------------------------------------------------- 1137 //---------------------------------------------------------------------- 1138 //---------------------------------------------------------------------- 1139 //---------------------------------------------------------------------- 1140 1141 void LIRGenerator::do_Phi(Phi* x) { 1142 // phi functions are never visited directly 1143 ShouldNotReachHere(); 1144 } 1145 1146 1147 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined. 1148 void LIRGenerator::do_Constant(Constant* x) { 1149 if (x->state_before() != NULL) { 1150 // Any constant with a ValueStack requires patching so emit the patch here 1151 LIR_Opr reg = rlock_result(x); 1152 CodeEmitInfo* info = state_for(x, x->state_before()); 1153 __ oop2reg_patch(NULL, reg, info); 1154 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) { 1155 if (!x->is_pinned()) { 1156 // unpinned constants are handled specially so that they can be 1157 // put into registers when they are used multiple times within a 1158 // block. After the block completes their operand will be 1159 // cleared so that other blocks can't refer to that register. 1160 set_result(x, load_constant(x)); 1161 } else { 1162 LIR_Opr res = x->operand(); 1163 if (!res->is_valid()) { 1164 res = LIR_OprFact::value_type(x->type()); 1165 } 1166 if (res->is_constant()) { 1167 LIR_Opr reg = rlock_result(x); 1168 __ move(res, reg); 1169 } else { 1170 set_result(x, res); 1171 } 1172 } 1173 } else { 1174 set_result(x, LIR_OprFact::value_type(x->type())); 1175 } 1176 } 1177 1178 1179 void LIRGenerator::do_Local(Local* x) { 1180 // operand_for_instruction has the side effect of setting the result 1181 // so there's no need to do it here. 1182 operand_for_instruction(x); 1183 } 1184 1185 1186 void LIRGenerator::do_Return(Return* x) { 1187 if (compilation()->env()->dtrace_method_probes()) { 1188 BasicTypeList signature; 1189 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 1190 signature.append(T_METADATA); // Method* 1191 LIR_OprList* args = new LIR_OprList(); 1192 args->append(getThreadPointer()); 1193 LIR_Opr meth = new_register(T_METADATA); 1194 __ metadata2reg(method()->constant_encoding(), meth); 1195 args->append(meth); 1196 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); 1197 } 1198 1199 if (x->type()->is_void()) { 1200 __ return_op(LIR_OprFact::illegalOpr); 1201 } else { 1202 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true); 1203 LIRItem result(x->result(), this); 1204 1205 result.load_item_force(reg); 1206 __ return_op(result.result()); 1207 } 1208 set_no_result(x); 1209 } 1210 1211 // Example: ref.get() 1212 // Combination of LoadField and g1 pre-write barrier 1213 void LIRGenerator::do_Reference_get(Intrinsic* x) { 1214 1215 const int referent_offset = java_lang_ref_Reference::referent_offset(); 1216 1217 assert(x->number_of_arguments() == 1, "wrong type"); 1218 1219 LIRItem reference(x->argument_at(0), this); 1220 reference.load_item(); 1221 1222 // need to perform the null check on the reference object 1223 CodeEmitInfo* info = NULL; 1224 if (x->needs_null_check()) { 1225 info = state_for(x); 1226 } 1227 1228 LIR_Opr result = rlock_result(x, T_OBJECT); 1229 access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, 1230 reference, LIR_OprFact::intConst(referent_offset), result); 1231 } 1232 1233 // Example: clazz.isInstance(object) 1234 void LIRGenerator::do_isInstance(Intrinsic* x) { 1235 assert(x->number_of_arguments() == 2, "wrong type"); 1236 1237 // TODO could try to substitute this node with an equivalent InstanceOf 1238 // if clazz is known to be a constant Class. This will pick up newly found 1239 // constants after HIR construction. I'll leave this to a future change. 1240 1241 // as a first cut, make a simple leaf call to runtime to stay platform independent. 1242 // could follow the aastore example in a future change. 1243 1244 LIRItem clazz(x->argument_at(0), this); 1245 LIRItem object(x->argument_at(1), this); 1246 clazz.load_item(); 1247 object.load_item(); 1248 LIR_Opr result = rlock_result(x); 1249 1250 // need to perform null check on clazz 1251 if (x->needs_null_check()) { 1252 CodeEmitInfo* info = state_for(x); 1253 __ null_check(clazz.result(), info); 1254 } 1255 1256 LIR_Opr call_result = call_runtime(clazz.value(), object.value(), 1257 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of), 1258 x->type(), 1259 NULL); // NULL CodeEmitInfo results in a leaf call 1260 __ move(call_result, result); 1261 } 1262 1263 void LIRGenerator::load_klass(LIR_Opr obj, LIR_Opr klass, CodeEmitInfo* null_check_info) { 1264 __ load_klass(obj, klass, null_check_info); 1265 } 1266 1267 // Example: object.getClass () 1268 void LIRGenerator::do_getClass(Intrinsic* x) { 1269 assert(x->number_of_arguments() == 1, "wrong type"); 1270 1271 LIRItem rcvr(x->argument_at(0), this); 1272 rcvr.load_item(); 1273 LIR_Opr temp = new_register(T_ADDRESS); 1274 LIR_Opr result = rlock_result(x); 1275 1276 // need to perform the null check on the rcvr 1277 CodeEmitInfo* info = NULL; 1278 if (x->needs_null_check()) { 1279 info = state_for(x); 1280 } 1281 1282 LIR_Opr klass = new_register(T_METADATA); 1283 load_klass(rcvr.result(), klass, info); 1284 __ move_wide(new LIR_Address(klass, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp); 1285 // mirror = ((OopHandle)mirror)->resolve(); 1286 access_load(IN_NATIVE, T_OBJECT, 1287 LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result); 1288 } 1289 1290 // java.lang.Class::isPrimitive() 1291 void LIRGenerator::do_isPrimitive(Intrinsic* x) { 1292 assert(x->number_of_arguments() == 1, "wrong type"); 1293 1294 LIRItem rcvr(x->argument_at(0), this); 1295 rcvr.load_item(); 1296 LIR_Opr temp = new_register(T_METADATA); 1297 LIR_Opr result = rlock_result(x); 1298 1299 CodeEmitInfo* info = NULL; 1300 if (x->needs_null_check()) { 1301 info = state_for(x); 1302 } 1303 1304 __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset(), T_ADDRESS), temp, info); 1305 __ cmp(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0)); 1306 __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN); 1307 } 1308 1309 // Example: Foo.class.getModifiers() 1310 void LIRGenerator::do_getModifiers(Intrinsic* x) { 1311 assert(x->number_of_arguments() == 1, "wrong type"); 1312 1313 LIRItem receiver(x->argument_at(0), this); 1314 receiver.load_item(); 1315 LIR_Opr result = rlock_result(x); 1316 1317 CodeEmitInfo* info = NULL; 1318 if (x->needs_null_check()) { 1319 info = state_for(x); 1320 } 1321 1322 // While reading off the universal constant mirror is less efficient than doing 1323 // another branch and returning the constant answer, this branchless code runs into 1324 // much less risk of confusion for C1 register allocator. The choice of the universe 1325 // object here is correct as long as it returns the same modifiers we would expect 1326 // from the primitive class itself. See spec for Class.getModifiers that provides 1327 // the typed array klasses with similar modifiers as their component types. 1328 1329 Klass* univ_klass_obj = Universe::byteArrayKlassObj(); 1330 assert(univ_klass_obj->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC), "Sanity"); 1331 LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass_obj); 1332 1333 LIR_Opr recv_klass = new_register(T_METADATA); 1334 __ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info); 1335 1336 // Check if this is a Java mirror of primitive type, and select the appropriate klass. 1337 LIR_Opr klass = new_register(T_METADATA); 1338 __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0)); 1339 __ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS); 1340 1341 // Get the answer. 1342 __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result); 1343 } 1344 1345 void LIRGenerator::do_getObjectSize(Intrinsic* x) { 1346 assert(x->number_of_arguments() == 3, "wrong type"); 1347 LIR_Opr result_reg = rlock_result(x); 1348 1349 LIRItem value(x->argument_at(2), this); 1350 value.load_item(); 1351 1352 LIR_Opr klass = new_register(T_METADATA); 1353 load_klass(value.result(), klass, NULL); 1354 LIR_Opr layout = new_register(T_INT); 1355 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout); 1356 1357 LabelObj* L_done = new LabelObj(); 1358 LabelObj* L_array = new LabelObj(); 1359 1360 __ cmp(lir_cond_lessEqual, layout, 0); 1361 __ branch(lir_cond_lessEqual, L_array->label()); 1362 1363 // Instance case: the layout helper gives us instance size almost directly, 1364 // but we need to mask out the _lh_instance_slow_path_bit. 1365 1366 assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit"); 1367 1368 LIR_Opr mask = load_immediate(~(jint) right_n_bits(LogBytesPerLong), T_INT); 1369 __ logical_and(layout, mask, layout); 1370 __ convert(Bytecodes::_i2l, layout, result_reg); 1371 1372 __ branch(lir_cond_always, L_done->label()); 1373 1374 // Array case: size is round(header + element_size*arraylength). 1375 // Since arraylength is different for every array instance, we have to 1376 // compute the whole thing at runtime. 1377 1378 __ branch_destination(L_array->label()); 1379 1380 int round_mask = MinObjAlignmentInBytes - 1; 1381 1382 // Figure out header sizes first. 1383 LIR_Opr hss = load_immediate(Klass::_lh_header_size_shift, T_INT); 1384 LIR_Opr hsm = load_immediate(Klass::_lh_header_size_mask, T_INT); 1385 1386 LIR_Opr header_size = new_register(T_INT); 1387 __ move(layout, header_size); 1388 LIR_Opr tmp = new_register(T_INT); 1389 __ unsigned_shift_right(header_size, hss, header_size, tmp); 1390 __ logical_and(header_size, hsm, header_size); 1391 __ add(header_size, LIR_OprFact::intConst(round_mask), header_size); 1392 1393 // Figure out the array length in bytes 1394 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); 1395 LIR_Opr l2esm = load_immediate(Klass::_lh_log2_element_size_mask, T_INT); 1396 __ logical_and(layout, l2esm, layout); 1397 1398 LIR_Opr length_int = new_register(T_INT); 1399 __ move(new LIR_Address(value.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), length_int); 1400 1401 #ifdef _LP64 1402 LIR_Opr length = new_register(T_LONG); 1403 __ convert(Bytecodes::_i2l, length_int, length); 1404 #endif 1405 1406 // Shift-left awkwardness. Normally it is just: 1407 // __ shift_left(length, layout, length); 1408 // But C1 cannot perform shift_left with non-constant count, so we end up 1409 // doing the per-bit loop dance here. x86_32 also does not know how to shift 1410 // longs, so we have to act on ints. 1411 LabelObj* L_shift_loop = new LabelObj(); 1412 LabelObj* L_shift_exit = new LabelObj(); 1413 1414 __ branch_destination(L_shift_loop->label()); 1415 __ cmp(lir_cond_equal, layout, 0); 1416 __ branch(lir_cond_equal, L_shift_exit->label()); 1417 1418 #ifdef _LP64 1419 __ shift_left(length, 1, length); 1420 #else 1421 __ shift_left(length_int, 1, length_int); 1422 #endif 1423 1424 __ sub(layout, LIR_OprFact::intConst(1), layout); 1425 1426 __ branch(lir_cond_always, L_shift_loop->label()); 1427 __ branch_destination(L_shift_exit->label()); 1428 1429 // Mix all up, round, and push to the result. 1430 #ifdef _LP64 1431 LIR_Opr header_size_long = new_register(T_LONG); 1432 __ convert(Bytecodes::_i2l, header_size, header_size_long); 1433 __ add(length, header_size_long, length); 1434 if (round_mask != 0) { 1435 LIR_Opr round_mask_opr = load_immediate(~(jlong)round_mask, T_LONG); 1436 __ logical_and(length, round_mask_opr, length); 1437 } 1438 __ move(length, result_reg); 1439 #else 1440 __ add(length_int, header_size, length_int); 1441 if (round_mask != 0) { 1442 LIR_Opr round_mask_opr = load_immediate(~round_mask, T_INT); 1443 __ logical_and(length_int, round_mask_opr, length_int); 1444 } 1445 __ convert(Bytecodes::_i2l, length_int, result_reg); 1446 #endif 1447 1448 __ branch_destination(L_done->label()); 1449 } 1450 1451 void LIRGenerator::do_scopedValueCache(Intrinsic* x) { 1452 do_JavaThreadField(x, JavaThread::scopedValueCache_offset()); 1453 } 1454 1455 // Example: Thread.currentCarrierThread() 1456 void LIRGenerator::do_currentCarrierThread(Intrinsic* x) { 1457 do_JavaThreadField(x, JavaThread::threadObj_offset()); 1458 } 1459 1460 void LIRGenerator::do_vthread(Intrinsic* x) { 1461 do_JavaThreadField(x, JavaThread::vthread_offset()); 1462 } 1463 1464 void LIRGenerator::do_JavaThreadField(Intrinsic* x, ByteSize offset) { 1465 assert(x->number_of_arguments() == 0, "wrong type"); 1466 LIR_Opr temp = new_register(T_ADDRESS); 1467 LIR_Opr reg = rlock_result(x); 1468 __ move(new LIR_Address(getThreadPointer(), in_bytes(offset), T_ADDRESS), temp); 1469 access_load(IN_NATIVE, T_OBJECT, 1470 LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg); 1471 } 1472 1473 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) { 1474 assert(x->number_of_arguments() == 1, "wrong type"); 1475 LIRItem receiver(x->argument_at(0), this); 1476 1477 receiver.load_item(); 1478 BasicTypeList signature; 1479 signature.append(T_OBJECT); // receiver 1480 LIR_OprList* args = new LIR_OprList(); 1481 args->append(receiver.result()); 1482 CodeEmitInfo* info = state_for(x, x->state()); 1483 call_runtime(&signature, args, 1484 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)), 1485 voidType, info); 1486 1487 set_no_result(x); 1488 } 1489 1490 1491 //------------------------local access-------------------------------------- 1492 1493 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) { 1494 if (x->operand()->is_illegal()) { 1495 Constant* c = x->as_Constant(); 1496 if (c != NULL) { 1497 x->set_operand(LIR_OprFact::value_type(c->type())); 1498 } else { 1499 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local"); 1500 // allocate a virtual register for this local or phi 1501 x->set_operand(rlock(x)); 1502 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL); 1503 } 1504 } 1505 return x->operand(); 1506 } 1507 1508 1509 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) { 1510 if (opr->is_virtual()) { 1511 return instruction_for_vreg(opr->vreg_number()); 1512 } 1513 return NULL; 1514 } 1515 1516 1517 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) { 1518 if (reg_num < _instruction_for_operand.length()) { 1519 return _instruction_for_operand.at(reg_num); 1520 } 1521 return NULL; 1522 } 1523 1524 1525 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) { 1526 if (_vreg_flags.size_in_bits() == 0) { 1527 BitMap2D temp(100, num_vreg_flags); 1528 _vreg_flags = temp; 1529 } 1530 _vreg_flags.at_put_grow(vreg_num, f, true); 1531 } 1532 1533 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) { 1534 if (!_vreg_flags.is_valid_index(vreg_num, f)) { 1535 return false; 1536 } 1537 return _vreg_flags.at(vreg_num, f); 1538 } 1539 1540 1541 // Block local constant handling. This code is useful for keeping 1542 // unpinned constants and constants which aren't exposed in the IR in 1543 // registers. Unpinned Constant instructions have their operands 1544 // cleared when the block is finished so that other blocks can't end 1545 // up referring to their registers. 1546 1547 LIR_Opr LIRGenerator::load_constant(Constant* x) { 1548 assert(!x->is_pinned(), "only for unpinned constants"); 1549 _unpinned_constants.append(x); 1550 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr()); 1551 } 1552 1553 1554 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) { 1555 BasicType t = c->type(); 1556 for (int i = 0; i < _constants.length(); i++) { 1557 LIR_Const* other = _constants.at(i); 1558 if (t == other->type()) { 1559 switch (t) { 1560 case T_INT: 1561 case T_FLOAT: 1562 if (c->as_jint_bits() != other->as_jint_bits()) continue; 1563 break; 1564 case T_LONG: 1565 case T_DOUBLE: 1566 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue; 1567 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue; 1568 break; 1569 case T_OBJECT: 1570 if (c->as_jobject() != other->as_jobject()) continue; 1571 break; 1572 default: 1573 break; 1574 } 1575 return _reg_for_constants.at(i); 1576 } 1577 } 1578 1579 LIR_Opr result = new_register(t); 1580 __ move((LIR_Opr)c, result); 1581 if (!in_conditional_code()) { 1582 _constants.append(c); 1583 _reg_for_constants.append(result); 1584 } 1585 return result; 1586 } 1587 1588 void LIRGenerator::set_in_conditional_code(bool v) { 1589 assert(v != _in_conditional_code, "must change state"); 1590 _in_conditional_code = v; 1591 } 1592 1593 1594 //------------------------field access-------------------------------------- 1595 1596 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { 1597 assert(x->number_of_arguments() == 4, "wrong type"); 1598 LIRItem obj (x->argument_at(0), this); // object 1599 LIRItem offset(x->argument_at(1), this); // offset of field 1600 LIRItem cmp (x->argument_at(2), this); // value to compare with field 1601 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp 1602 assert(obj.type()->tag() == objectTag, "invalid type"); 1603 assert(cmp.type()->tag() == type->tag(), "invalid type"); 1604 assert(val.type()->tag() == type->tag(), "invalid type"); 1605 1606 LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type), 1607 obj, offset, cmp, val); 1608 set_result(x, result); 1609 } 1610 1611 // Comment copied form templateTable_i486.cpp 1612 // ---------------------------------------------------------------------------- 1613 // Volatile variables demand their effects be made known to all CPU's in 1614 // order. Store buffers on most chips allow reads & writes to reorder; the 1615 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 1616 // memory barrier (i.e., it's not sufficient that the interpreter does not 1617 // reorder volatile references, the hardware also must not reorder them). 1618 // 1619 // According to the new Java Memory Model (JMM): 1620 // (1) All volatiles are serialized wrt to each other. 1621 // ALSO reads & writes act as acquire & release, so: 1622 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 1623 // the read float up to before the read. It's OK for non-volatile memory refs 1624 // that happen before the volatile read to float down below it. 1625 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 1626 // that happen BEFORE the write float down to after the write. It's OK for 1627 // non-volatile memory refs that happen after the volatile write to float up 1628 // before it. 1629 // 1630 // We only put in barriers around volatile refs (they are expensive), not 1631 // _between_ memory refs (that would require us to track the flavor of the 1632 // previous memory refs). Requirements (2) and (3) require some barriers 1633 // before volatile stores and after volatile loads. These nearly cover 1634 // requirement (1) but miss the volatile-store-volatile-load case. This final 1635 // case is placed after volatile-stores although it could just as well go 1636 // before volatile-loads. 1637 1638 1639 void LIRGenerator::do_StoreField(StoreField* x) { 1640 bool needs_patching = x->needs_patching(); 1641 bool is_volatile = x->field()->is_volatile(); 1642 BasicType field_type = x->field_type(); 1643 1644 CodeEmitInfo* info = NULL; 1645 if (needs_patching) { 1646 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 1647 info = state_for(x, x->state_before()); 1648 } else if (x->needs_null_check()) { 1649 NullCheck* nc = x->explicit_null_check(); 1650 if (nc == NULL) { 1651 info = state_for(x); 1652 } else { 1653 info = state_for(nc); 1654 } 1655 } 1656 1657 LIRItem object(x->obj(), this); 1658 LIRItem value(x->value(), this); 1659 1660 object.load_item(); 1661 1662 if (is_volatile || needs_patching) { 1663 // load item if field is volatile (fewer special cases for volatiles) 1664 // load item if field not initialized 1665 // load item if field not constant 1666 // because of code patching we cannot inline constants 1667 if (field_type == T_BYTE || field_type == T_BOOLEAN) { 1668 value.load_byte_item(); 1669 } else { 1670 value.load_item(); 1671 } 1672 } else { 1673 value.load_for_store(field_type); 1674 } 1675 1676 set_no_result(x); 1677 1678 #ifndef PRODUCT 1679 if (PrintNotLoaded && needs_patching) { 1680 tty->print_cr(" ###class not loaded at store_%s bci %d", 1681 x->is_static() ? "static" : "field", x->printable_bci()); 1682 } 1683 #endif 1684 1685 if (!inline_type_field_access_prolog(x)) { 1686 // Field store will always deopt due to unloaded field or holder klass 1687 return; 1688 } 1689 1690 if (x->needs_null_check() && 1691 (needs_patching || 1692 MacroAssembler::needs_explicit_null_check(x->offset()))) { 1693 // Emit an explicit null check because the offset is too large. 1694 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a 1695 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code. 1696 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching); 1697 } 1698 1699 DecoratorSet decorators = IN_HEAP; 1700 if (is_volatile) { 1701 decorators |= MO_SEQ_CST; 1702 } 1703 if (needs_patching) { 1704 decorators |= C1_NEEDS_PATCHING; 1705 } 1706 1707 access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()), 1708 value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info); 1709 } 1710 1711 // FIXME -- I can't find any other way to pass an address to access_load_at(). 1712 class TempResolvedAddress: public Instruction { 1713 public: 1714 TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) { 1715 set_operand(addr); 1716 } 1717 virtual void input_values_do(ValueVisitor*) {} 1718 virtual void visit(InstructionVisitor* v) {} 1719 virtual const char* name() const { return "TempResolvedAddress"; } 1720 }; 1721 1722 LIR_Opr LIRGenerator::get_and_load_element_address(LIRItem& array, LIRItem& index) { 1723 ciType* array_type = array.value()->declared_type(); 1724 ciFlatArrayKlass* flat_array_klass = array_type->as_flat_array_klass(); 1725 assert(flat_array_klass->is_loaded(), "must be"); 1726 1727 int array_header_size = flat_array_klass->array_header_in_bytes(); 1728 int shift = flat_array_klass->log2_element_size(); 1729 1730 #ifndef _LP64 1731 LIR_Opr index_op = new_register(T_INT); 1732 // FIXME -- on 32-bit, the shift below can overflow, so we need to check that 1733 // the top (shift+1) bits of index_op must be zero, or 1734 // else throw ArrayIndexOutOfBoundsException 1735 if (index.result()->is_constant()) { 1736 jint const_index = index.result()->as_jint(); 1737 __ move(LIR_OprFact::intConst(const_index << shift), index_op); 1738 } else { 1739 __ shift_left(index_op, shift, index.result()); 1740 } 1741 #else 1742 LIR_Opr index_op = new_register(T_LONG); 1743 if (index.result()->is_constant()) { 1744 jint const_index = index.result()->as_jint(); 1745 __ move(LIR_OprFact::longConst(const_index << shift), index_op); 1746 } else { 1747 __ convert(Bytecodes::_i2l, index.result(), index_op); 1748 // Need to shift manually, as LIR_Address can scale only up to 3. 1749 __ shift_left(index_op, shift, index_op); 1750 } 1751 #endif 1752 1753 LIR_Opr elm_op = new_pointer_register(); 1754 LIR_Address* elm_address = generate_address(array.result(), index_op, 0, array_header_size, T_ADDRESS); 1755 __ leal(LIR_OprFact::address(elm_address), elm_op); 1756 return elm_op; 1757 } 1758 1759 void LIRGenerator::access_sub_element(LIRItem& array, LIRItem& index, LIR_Opr& result, ciField* field, int sub_offset) { 1760 assert(field != NULL, "Need a subelement type specified"); 1761 1762 // Find the starting address of the source (inside the array) 1763 LIR_Opr elm_op = get_and_load_element_address(array, index); 1764 1765 BasicType subelt_type = field->type()->basic_type(); 1766 TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(subelt_type), elm_op); 1767 LIRItem elm_item(elm_resolved_addr, this); 1768 1769 DecoratorSet decorators = IN_HEAP; 1770 access_load_at(decorators, subelt_type, 1771 elm_item, LIR_OprFact::intConst(sub_offset), result, 1772 NULL, NULL); 1773 1774 if (field->is_null_free()) { 1775 assert(field->type()->is_loaded(), "Must be"); 1776 assert(field->type()->is_inlinetype(), "Must be if loaded"); 1777 assert(field->type()->as_inline_klass()->is_initialized(), "Must be"); 1778 LabelObj* L_end = new LabelObj(); 1779 __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(NULL)); 1780 __ branch(lir_cond_notEqual, L_end->label()); 1781 set_in_conditional_code(true); 1782 Constant* default_value = new Constant(new InstanceConstant(field->type()->as_inline_klass()->default_instance())); 1783 if (default_value->is_pinned()) { 1784 __ move(LIR_OprFact::value_type(default_value->type()), result); 1785 } else { 1786 __ move(load_constant(default_value), result); 1787 } 1788 __ branch_destination(L_end->label()); 1789 set_in_conditional_code(false); 1790 } 1791 } 1792 1793 void LIRGenerator::access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item, 1794 ciField* field, int sub_offset) { 1795 assert(sub_offset == 0 || field != NULL, "Sanity check"); 1796 1797 // Find the starting address of the source (inside the array) 1798 LIR_Opr elm_op = get_and_load_element_address(array, index); 1799 1800 ciInlineKlass* elem_klass = NULL; 1801 if (field != NULL) { 1802 elem_klass = field->type()->as_inline_klass(); 1803 } else { 1804 elem_klass = array.value()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass(); 1805 } 1806 for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) { 1807 ciField* inner_field = elem_klass->nonstatic_field_at(i); 1808 assert(!inner_field->is_flattened(), "flattened fields must have been expanded"); 1809 int obj_offset = inner_field->offset(); 1810 int elm_offset = obj_offset - elem_klass->first_field_offset() + sub_offset; // object header is not stored in array. 1811 BasicType field_type = inner_field->type()->basic_type(); 1812 1813 // Types which are smaller than int are still passed in an int register. 1814 BasicType reg_type = field_type; 1815 switch (reg_type) { 1816 case T_BYTE: 1817 case T_BOOLEAN: 1818 case T_SHORT: 1819 case T_CHAR: 1820 reg_type = T_INT; 1821 break; 1822 default: 1823 break; 1824 } 1825 1826 LIR_Opr temp = new_register(reg_type); 1827 TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op); 1828 LIRItem elm_item(elm_resolved_addr, this); 1829 1830 DecoratorSet decorators = IN_HEAP; 1831 if (is_load) { 1832 access_load_at(decorators, field_type, 1833 elm_item, LIR_OprFact::intConst(elm_offset), temp, 1834 NULL, NULL); 1835 access_store_at(decorators, field_type, 1836 obj_item, LIR_OprFact::intConst(obj_offset), temp, 1837 NULL, NULL); 1838 } else { 1839 access_load_at(decorators, field_type, 1840 obj_item, LIR_OprFact::intConst(obj_offset), temp, 1841 NULL, NULL); 1842 access_store_at(decorators, field_type, 1843 elm_item, LIR_OprFact::intConst(elm_offset), temp, 1844 NULL, NULL); 1845 } 1846 } 1847 } 1848 1849 void LIRGenerator::check_flattened_array(LIR_Opr array, LIR_Opr value, CodeStub* slow_path) { 1850 LIR_Opr tmp = new_register(T_METADATA); 1851 __ check_flattened_array(array, value, tmp, slow_path); 1852 } 1853 1854 void LIRGenerator::check_null_free_array(LIRItem& array, LIRItem& value, CodeEmitInfo* info) { 1855 LabelObj* L_end = new LabelObj(); 1856 LIR_Opr tmp = new_register(T_METADATA); 1857 __ check_null_free_array(array.result(), tmp); 1858 __ branch(lir_cond_equal, L_end->label()); 1859 __ null_check(value.result(), info); 1860 __ branch_destination(L_end->label()); 1861 } 1862 1863 bool LIRGenerator::needs_flattened_array_store_check(StoreIndexed* x) { 1864 if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) { 1865 ciType* type = x->value()->declared_type(); 1866 if (type != NULL && type->is_klass()) { 1867 ciKlass* klass = type->as_klass(); 1868 if (!klass->can_be_inline_klass() || (klass->is_inlinetype() && !klass->as_inline_klass()->flatten_array())) { 1869 // This is known to be a non-flattened object. If the array is flattened, 1870 // it will be caught by the code generated by array_store_check(). 1871 return false; 1872 } 1873 } 1874 // We're not 100% sure, so let's do the flattened_array_store_check. 1875 return true; 1876 } 1877 return false; 1878 } 1879 1880 bool LIRGenerator::needs_null_free_array_store_check(StoreIndexed* x) { 1881 return x->elt_type() == T_OBJECT && x->array()->maybe_null_free_array(); 1882 } 1883 1884 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { 1885 assert(x->is_pinned(),""); 1886 assert(x->elt_type() != T_ARRAY, "never used"); 1887 bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array(); 1888 bool needs_range_check = x->compute_needs_range_check(); 1889 bool use_length = x->length() != NULL; 1890 bool obj_store = is_reference_type(x->elt_type()); 1891 bool needs_store_check = obj_store && !(is_loaded_flattened_array && x->is_exact_flattened_array_store()) && 1892 (x->value()->as_Constant() == NULL || 1893 !get_jobject_constant(x->value())->is_null_object()); 1894 1895 LIRItem array(x->array(), this); 1896 LIRItem index(x->index(), this); 1897 LIRItem value(x->value(), this); 1898 LIRItem length(this); 1899 1900 array.load_item(); 1901 index.load_nonconstant(); 1902 1903 if (use_length && needs_range_check) { 1904 length.set_instruction(x->length()); 1905 length.load_item(); 1906 } 1907 1908 if (needs_store_check || x->check_boolean() 1909 || is_loaded_flattened_array || needs_flattened_array_store_check(x) || needs_null_free_array_store_check(x)) { 1910 value.load_item(); 1911 } else { 1912 value.load_for_store(x->elt_type()); 1913 } 1914 1915 set_no_result(x); 1916 1917 // the CodeEmitInfo must be duplicated for each different 1918 // LIR-instruction because spilling can occur anywhere between two 1919 // instructions and so the debug information must be different 1920 CodeEmitInfo* range_check_info = state_for(x); 1921 CodeEmitInfo* null_check_info = NULL; 1922 if (x->needs_null_check()) { 1923 null_check_info = new CodeEmitInfo(range_check_info); 1924 } 1925 1926 if (GenerateRangeChecks && needs_range_check) { 1927 if (use_length) { 1928 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 1929 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result())); 1930 } else { 1931 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 1932 // range_check also does the null check 1933 null_check_info = NULL; 1934 } 1935 } 1936 1937 if (x->should_profile()) { 1938 if (x->array()->is_loaded_flattened_array()) { 1939 // No need to profile a store to a flattened array of known type. This can happen if 1940 // the type only became known after optimizations (for example, after the PhiSimplifier). 1941 x->set_should_profile(false); 1942 } else { 1943 ciMethodData* md = NULL; 1944 ciArrayLoadStoreData* load_store = NULL; 1945 profile_array_type(x, md, load_store); 1946 if (x->array()->maybe_null_free_array()) { 1947 profile_null_free_array(array, md, load_store); 1948 } 1949 profile_element_type(x->value(), md, load_store); 1950 } 1951 } 1952 1953 if (GenerateArrayStoreCheck && needs_store_check) { 1954 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); 1955 array_store_check(value.result(), array.result(), store_check_info, NULL, -1); 1956 } 1957 1958 if (is_loaded_flattened_array) { 1959 if (!x->value()->is_null_free()) { 1960 __ null_check(value.result(), new CodeEmitInfo(range_check_info)); 1961 } 1962 // If array element is an empty inline type, no need to copy anything 1963 if (!x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) { 1964 access_flattened_array(false, array, index, value); 1965 } 1966 } else { 1967 StoreFlattenedArrayStub* slow_path = NULL; 1968 1969 if (needs_flattened_array_store_check(x)) { 1970 // Check if we indeed have a flattened array 1971 index.load_item(); 1972 slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x, x->state_before())); 1973 check_flattened_array(array.result(), value.result(), slow_path); 1974 set_in_conditional_code(true); 1975 } else if (needs_null_free_array_store_check(x)) { 1976 CodeEmitInfo* info = new CodeEmitInfo(range_check_info); 1977 check_null_free_array(array, value, info); 1978 } 1979 1980 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 1981 if (x->check_boolean()) { 1982 decorators |= C1_MASK_BOOLEAN; 1983 } 1984 1985 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(), 1986 NULL, null_check_info); 1987 if (slow_path != NULL) { 1988 __ branch_destination(slow_path->continuation()); 1989 set_in_conditional_code(false); 1990 } 1991 } 1992 } 1993 1994 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type, 1995 LIRItem& base, LIR_Opr offset, LIR_Opr result, 1996 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) { 1997 decorators |= ACCESS_READ; 1998 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info); 1999 if (access.is_raw()) { 2000 _barrier_set->BarrierSetC1::load_at(access, result); 2001 } else { 2002 _barrier_set->load_at(access, result); 2003 } 2004 } 2005 2006 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type, 2007 LIR_Opr addr, LIR_Opr result) { 2008 decorators |= ACCESS_READ; 2009 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type); 2010 access.set_resolved_addr(addr); 2011 if (access.is_raw()) { 2012 _barrier_set->BarrierSetC1::load(access, result); 2013 } else { 2014 _barrier_set->load(access, result); 2015 } 2016 } 2017 2018 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type, 2019 LIRItem& base, LIR_Opr offset, LIR_Opr value, 2020 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) { 2021 decorators |= ACCESS_WRITE; 2022 LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info); 2023 if (access.is_raw()) { 2024 _barrier_set->BarrierSetC1::store_at(access, value); 2025 } else { 2026 _barrier_set->store_at(access, value); 2027 } 2028 } 2029 2030 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type, 2031 LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) { 2032 decorators |= ACCESS_READ; 2033 decorators |= ACCESS_WRITE; 2034 // Atomic operations are SEQ_CST by default 2035 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0; 2036 LIRAccess access(this, decorators, base, offset, type); 2037 if (access.is_raw()) { 2038 return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value); 2039 } else { 2040 return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value); 2041 } 2042 } 2043 2044 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type, 2045 LIRItem& base, LIRItem& offset, LIRItem& value) { 2046 decorators |= ACCESS_READ; 2047 decorators |= ACCESS_WRITE; 2048 // Atomic operations are SEQ_CST by default 2049 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0; 2050 LIRAccess access(this, decorators, base, offset, type); 2051 if (access.is_raw()) { 2052 return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value); 2053 } else { 2054 return _barrier_set->atomic_xchg_at(access, value); 2055 } 2056 } 2057 2058 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type, 2059 LIRItem& base, LIRItem& offset, LIRItem& value) { 2060 decorators |= ACCESS_READ; 2061 decorators |= ACCESS_WRITE; 2062 // Atomic operations are SEQ_CST by default 2063 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0; 2064 LIRAccess access(this, decorators, base, offset, type); 2065 if (access.is_raw()) { 2066 return _barrier_set->BarrierSetC1::atomic_add_at(access, value); 2067 } else { 2068 return _barrier_set->atomic_add_at(access, value); 2069 } 2070 } 2071 2072 bool LIRGenerator::inline_type_field_access_prolog(AccessField* x) { 2073 ciField* field = x->field(); 2074 assert(!field->is_flattened(), "Flattened field access should have been expanded"); 2075 if (!field->is_null_free()) { 2076 return true; // Not an inline type field 2077 } 2078 // Deoptimize if the access is non-static and requires patching (holder not loaded 2079 // or not accessible) because then we only have partial field information and the 2080 // field could be flattened (see ciField constructor). 2081 bool could_be_flat = !x->is_static() && x->needs_patching(); 2082 // Deoptimize if we load from a static field with an uninitialized type because we 2083 // need to throw an exception if initialization of the type failed. 2084 bool not_initialized = x->is_static() && x->as_LoadField() != NULL && 2085 !field->type()->as_instance_klass()->is_initialized(); 2086 if (could_be_flat || not_initialized) { 2087 CodeEmitInfo* info = state_for(x, x->state_before()); 2088 CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info), 2089 Deoptimization::Reason_unloaded, 2090 Deoptimization::Action_make_not_entrant); 2091 __ jump(stub); 2092 return false; 2093 } 2094 return true; 2095 } 2096 2097 void LIRGenerator::do_LoadField(LoadField* x) { 2098 bool needs_patching = x->needs_patching(); 2099 bool is_volatile = x->field()->is_volatile(); 2100 BasicType field_type = x->field_type(); 2101 2102 CodeEmitInfo* info = NULL; 2103 if (needs_patching) { 2104 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 2105 info = state_for(x, x->state_before()); 2106 } else if (x->needs_null_check()) { 2107 NullCheck* nc = x->explicit_null_check(); 2108 if (nc == NULL) { 2109 info = state_for(x); 2110 } else { 2111 info = state_for(nc); 2112 } 2113 } 2114 2115 LIRItem object(x->obj(), this); 2116 2117 object.load_item(); 2118 2119 #ifndef PRODUCT 2120 if (PrintNotLoaded && needs_patching) { 2121 tty->print_cr(" ###class not loaded at load_%s bci %d", 2122 x->is_static() ? "static" : "field", x->printable_bci()); 2123 } 2124 #endif 2125 2126 if (!inline_type_field_access_prolog(x)) { 2127 // Field load will always deopt due to unloaded field or holder klass 2128 LIR_Opr result = rlock_result(x, field_type); 2129 __ move(LIR_OprFact::oopConst(NULL), result); 2130 return; 2131 } 2132 2133 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception(); 2134 if (x->needs_null_check() && 2135 (needs_patching || 2136 MacroAssembler::needs_explicit_null_check(x->offset()) || 2137 stress_deopt)) { 2138 LIR_Opr obj = object.result(); 2139 if (stress_deopt) { 2140 obj = new_register(T_OBJECT); 2141 __ move(LIR_OprFact::oopConst(NULL), obj); 2142 } 2143 // Emit an explicit null check because the offset is too large. 2144 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a 2145 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code. 2146 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching); 2147 } 2148 2149 DecoratorSet decorators = IN_HEAP; 2150 if (is_volatile) { 2151 decorators |= MO_SEQ_CST; 2152 } 2153 if (needs_patching) { 2154 decorators |= C1_NEEDS_PATCHING; 2155 } 2156 2157 LIR_Opr result = rlock_result(x, field_type); 2158 access_load_at(decorators, field_type, 2159 object, LIR_OprFact::intConst(x->offset()), result, 2160 info ? new CodeEmitInfo(info) : NULL, info); 2161 2162 ciField* field = x->field(); 2163 if (field->is_null_free()) { 2164 // Load from non-flattened inline type field requires 2165 // a null check to replace null with the default value. 2166 ciInstanceKlass* holder = field->holder(); 2167 if (field->is_static() && holder->is_loaded()) { 2168 ciObject* val = holder->java_mirror()->field_value(field).as_object(); 2169 if (!val->is_null_object()) { 2170 // Static field is initialized, we don't need to perform a null check. 2171 return; 2172 } 2173 } 2174 ciInlineKlass* inline_klass = field->type()->as_inline_klass(); 2175 if (inline_klass->is_initialized()) { 2176 LabelObj* L_end = new LabelObj(); 2177 __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(NULL)); 2178 __ branch(lir_cond_notEqual, L_end->label()); 2179 set_in_conditional_code(true); 2180 Constant* default_value = new Constant(new InstanceConstant(inline_klass->default_instance())); 2181 if (default_value->is_pinned()) { 2182 __ move(LIR_OprFact::value_type(default_value->type()), result); 2183 } else { 2184 __ move(load_constant(default_value), result); 2185 } 2186 __ branch_destination(L_end->label()); 2187 set_in_conditional_code(false); 2188 } else { 2189 info = state_for(x, x->state_before()); 2190 __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL)); 2191 __ branch(lir_cond_equal, new DeoptimizeStub(info, Deoptimization::Reason_uninitialized, 2192 Deoptimization::Action_make_not_entrant)); 2193 } 2194 } 2195 } 2196 2197 // int/long jdk.internal.util.Preconditions.checkIndex 2198 void LIRGenerator::do_PreconditionsCheckIndex(Intrinsic* x, BasicType type) { 2199 assert(x->number_of_arguments() == 3, "wrong type"); 2200 LIRItem index(x->argument_at(0), this); 2201 LIRItem length(x->argument_at(1), this); 2202 LIRItem oobef(x->argument_at(2), this); 2203 2204 index.load_item(); 2205 length.load_item(); 2206 oobef.load_item(); 2207 2208 LIR_Opr result = rlock_result(x); 2209 // x->state() is created from copy_state_for_exception, it does not contains arguments 2210 // we should prepare them before entering into interpreter mode due to deoptimization. 2211 ValueStack* state = x->state(); 2212 for (int i = 0; i < x->number_of_arguments(); i++) { 2213 Value arg = x->argument_at(i); 2214 state->push(arg->type(), arg); 2215 } 2216 CodeEmitInfo* info = state_for(x, state); 2217 2218 LIR_Opr len = length.result(); 2219 LIR_Opr zero; 2220 if (type == T_INT) { 2221 zero = LIR_OprFact::intConst(0); 2222 if (length.result()->is_constant()){ 2223 len = LIR_OprFact::intConst(length.result()->as_jint()); 2224 } 2225 } else { 2226 assert(type == T_LONG, "sanity check"); 2227 zero = LIR_OprFact::longConst(0); 2228 if (length.result()->is_constant()){ 2229 len = LIR_OprFact::longConst(length.result()->as_jlong()); 2230 } 2231 } 2232 // C1 can not handle the case that comparing index with constant value while condition 2233 // is neither lir_cond_equal nor lir_cond_notEqual, see LIR_Assembler::comp_op. 2234 LIR_Opr zero_reg = new_register(type); 2235 __ move(zero, zero_reg); 2236 #if defined(X86) && !defined(_LP64) 2237 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy. 2238 LIR_Opr index_copy = new_register(index.type()); 2239 // index >= 0 2240 __ move(index.result(), index_copy); 2241 __ cmp(lir_cond_less, index_copy, zero_reg); 2242 __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check, 2243 Deoptimization::Action_make_not_entrant)); 2244 // index < length 2245 __ move(index.result(), index_copy); 2246 __ cmp(lir_cond_greaterEqual, index_copy, len); 2247 __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check, 2248 Deoptimization::Action_make_not_entrant)); 2249 #else 2250 // index >= 0 2251 __ cmp(lir_cond_less, index.result(), zero_reg); 2252 __ branch(lir_cond_less, new DeoptimizeStub(info, Deoptimization::Reason_range_check, 2253 Deoptimization::Action_make_not_entrant)); 2254 // index < length 2255 __ cmp(lir_cond_greaterEqual, index.result(), len); 2256 __ branch(lir_cond_greaterEqual, new DeoptimizeStub(info, Deoptimization::Reason_range_check, 2257 Deoptimization::Action_make_not_entrant)); 2258 #endif 2259 __ move(index.result(), result); 2260 } 2261 2262 //------------------------array access-------------------------------------- 2263 2264 2265 void LIRGenerator::do_ArrayLength(ArrayLength* x) { 2266 LIRItem array(x->array(), this); 2267 array.load_item(); 2268 LIR_Opr reg = rlock_result(x); 2269 2270 CodeEmitInfo* info = NULL; 2271 if (x->needs_null_check()) { 2272 NullCheck* nc = x->explicit_null_check(); 2273 if (nc == NULL) { 2274 info = state_for(x); 2275 } else { 2276 info = state_for(nc); 2277 } 2278 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) { 2279 LIR_Opr obj = new_register(T_OBJECT); 2280 __ move(LIR_OprFact::oopConst(NULL), obj); 2281 __ null_check(obj, new CodeEmitInfo(info)); 2282 } 2283 } 2284 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); 2285 } 2286 2287 2288 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { 2289 bool use_length = x->length() != NULL; 2290 LIRItem array(x->array(), this); 2291 LIRItem index(x->index(), this); 2292 LIRItem length(this); 2293 bool needs_range_check = x->compute_needs_range_check(); 2294 2295 if (use_length && needs_range_check) { 2296 length.set_instruction(x->length()); 2297 length.load_item(); 2298 } 2299 2300 array.load_item(); 2301 if (index.is_constant() && can_inline_as_constant(x->index())) { 2302 // let it be a constant 2303 index.dont_load_item(); 2304 } else { 2305 index.load_item(); 2306 } 2307 2308 CodeEmitInfo* range_check_info = state_for(x); 2309 CodeEmitInfo* null_check_info = NULL; 2310 if (x->needs_null_check()) { 2311 NullCheck* nc = x->explicit_null_check(); 2312 if (nc != NULL) { 2313 null_check_info = state_for(nc); 2314 } else { 2315 null_check_info = range_check_info; 2316 } 2317 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) { 2318 LIR_Opr obj = new_register(T_OBJECT); 2319 __ move(LIR_OprFact::oopConst(NULL), obj); 2320 __ null_check(obj, new CodeEmitInfo(null_check_info)); 2321 } 2322 } 2323 2324 if (GenerateRangeChecks && needs_range_check) { 2325 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) { 2326 __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result())); 2327 } else if (use_length) { 2328 // TODO: use a (modified) version of array_range_check that does not require a 2329 // constant length to be loaded to a register 2330 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 2331 __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result())); 2332 } else { 2333 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 2334 // The range check performs the null check, so clear it out for the load 2335 null_check_info = NULL; 2336 } 2337 } 2338 2339 ciMethodData* md = NULL; 2340 ciArrayLoadStoreData* load_store = NULL; 2341 if (x->should_profile()) { 2342 if (x->array()->is_loaded_flattened_array()) { 2343 // No need to profile a load from a flattened array of known type. This can happen if 2344 // the type only became known after optimizations (for example, after the PhiSimplifier). 2345 x->set_should_profile(false); 2346 } else { 2347 profile_array_type(x, md, load_store); 2348 } 2349 } 2350 2351 Value element; 2352 if (x->vt() != NULL) { 2353 assert(x->array()->is_loaded_flattened_array(), "must be"); 2354 // Find the destination address (of the NewInlineTypeInstance). 2355 LIRItem obj_item(x->vt(), this); 2356 2357 access_flattened_array(true, array, index, obj_item, 2358 x->delayed() == NULL ? 0 : x->delayed()->field(), 2359 x->delayed() == NULL ? 0 : x->delayed()->offset()); 2360 set_no_result(x); 2361 } else if (x->delayed() != NULL) { 2362 assert(x->array()->is_loaded_flattened_array(), "must be"); 2363 LIR_Opr result = rlock_result(x, x->delayed()->field()->type()->basic_type()); 2364 access_sub_element(array, index, result, x->delayed()->field(), x->delayed()->offset()); 2365 } else if (x->array() != NULL && x->array()->is_loaded_flattened_array() && 2366 x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_initialized() && 2367 x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass()->is_empty()) { 2368 // Load the default instance instead of reading the element 2369 ciInlineKlass* elem_klass = x->array()->declared_type()->as_flat_array_klass()->element_klass()->as_inline_klass(); 2370 LIR_Opr result = rlock_result(x, x->elt_type()); 2371 assert(elem_klass->is_initialized(), "Must be"); 2372 Constant* default_value = new Constant(new InstanceConstant(elem_klass->default_instance())); 2373 if (default_value->is_pinned()) { 2374 __ move(LIR_OprFact::value_type(default_value->type()), result); 2375 } else { 2376 __ move(load_constant(default_value), result); 2377 } 2378 } else { 2379 LIR_Opr result = rlock_result(x, x->elt_type()); 2380 LoadFlattenedArrayStub* slow_path = NULL; 2381 2382 if (x->should_profile() && x->array()->maybe_null_free_array()) { 2383 profile_null_free_array(array, md, load_store); 2384 } 2385 2386 if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) { 2387 assert(x->delayed() == NULL, "Delayed LoadIndexed only apply to loaded_flattened_arrays"); 2388 index.load_item(); 2389 // if we are loading from flattened array, load it using a runtime call 2390 slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x, x->state_before())); 2391 check_flattened_array(array.result(), LIR_OprFact::illegalOpr, slow_path); 2392 set_in_conditional_code(true); 2393 } 2394 2395 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2396 access_load_at(decorators, x->elt_type(), 2397 array, index.result(), result, 2398 NULL, null_check_info); 2399 2400 if (slow_path != NULL) { 2401 __ branch_destination(slow_path->continuation()); 2402 set_in_conditional_code(false); 2403 } 2404 2405 element = x; 2406 } 2407 2408 if (x->should_profile()) { 2409 profile_element_type(element, md, load_store); 2410 } 2411 } 2412 2413 void LIRGenerator::do_Deoptimize(Deoptimize* x) { 2414 // This happens only when a class X uses the withfield/aconst_init bytecode 2415 // to refer to an inline class V, where V has not yet been loaded/resolved. 2416 // This is not a common case. Let's just deoptimize. 2417 CodeEmitInfo* info = state_for(x, x->state_before()); 2418 CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info), 2419 Deoptimization::Reason_unloaded, 2420 Deoptimization::Action_make_not_entrant); 2421 __ jump(stub); 2422 LIR_Opr reg = rlock_result(x, T_OBJECT); 2423 __ move(LIR_OprFact::oopConst(NULL), reg); 2424 } 2425 2426 void LIRGenerator::do_NullCheck(NullCheck* x) { 2427 if (x->can_trap()) { 2428 LIRItem value(x->obj(), this); 2429 value.load_item(); 2430 CodeEmitInfo* info = state_for(x); 2431 __ null_check(value.result(), info); 2432 } 2433 } 2434 2435 2436 void LIRGenerator::do_TypeCast(TypeCast* x) { 2437 LIRItem value(x->obj(), this); 2438 value.load_item(); 2439 // the result is the same as from the node we are casting 2440 set_result(x, value.result()); 2441 } 2442 2443 2444 void LIRGenerator::do_Throw(Throw* x) { 2445 LIRItem exception(x->exception(), this); 2446 exception.load_item(); 2447 set_no_result(x); 2448 LIR_Opr exception_opr = exception.result(); 2449 CodeEmitInfo* info = state_for(x, x->state()); 2450 2451 #ifndef PRODUCT 2452 if (PrintC1Statistics) { 2453 increment_counter(Runtime1::throw_count_address(), T_INT); 2454 } 2455 #endif 2456 2457 // check if the instruction has an xhandler in any of the nested scopes 2458 bool unwind = false; 2459 if (info->exception_handlers()->length() == 0) { 2460 // this throw is not inside an xhandler 2461 unwind = true; 2462 } else { 2463 // get some idea of the throw type 2464 bool type_is_exact = true; 2465 ciType* throw_type = x->exception()->exact_type(); 2466 if (throw_type == NULL) { 2467 type_is_exact = false; 2468 throw_type = x->exception()->declared_type(); 2469 } 2470 if (throw_type != NULL && throw_type->is_instance_klass()) { 2471 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type; 2472 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact); 2473 } 2474 } 2475 2476 // do null check before moving exception oop into fixed register 2477 // to avoid a fixed interval with an oop during the null check. 2478 // Use a copy of the CodeEmitInfo because debug information is 2479 // different for null_check and throw. 2480 if (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL) { 2481 // if the exception object wasn't created using new then it might be null. 2482 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci()))); 2483 } 2484 2485 if (compilation()->env()->jvmti_can_post_on_exceptions()) { 2486 // we need to go through the exception lookup path to get JVMTI 2487 // notification done 2488 unwind = false; 2489 } 2490 2491 // move exception oop into fixed register 2492 __ move(exception_opr, exceptionOopOpr()); 2493 2494 if (unwind) { 2495 __ unwind_exception(exceptionOopOpr()); 2496 } else { 2497 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info); 2498 } 2499 } 2500 2501 2502 void LIRGenerator::do_RoundFP(RoundFP* x) { 2503 assert(strict_fp_requires_explicit_rounding, "not required"); 2504 2505 LIRItem input(x->input(), this); 2506 input.load_item(); 2507 LIR_Opr input_opr = input.result(); 2508 assert(input_opr->is_register(), "why round if value is not in a register?"); 2509 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value"); 2510 if (input_opr->is_single_fpu()) { 2511 set_result(x, round_item(input_opr)); // This code path not currently taken 2512 } else { 2513 LIR_Opr result = new_register(T_DOUBLE); 2514 set_vreg_flag(result, must_start_in_memory); 2515 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result); 2516 set_result(x, result); 2517 } 2518 } 2519 2520 2521 void LIRGenerator::do_UnsafeGet(UnsafeGet* x) { 2522 BasicType type = x->basic_type(); 2523 LIRItem src(x->object(), this); 2524 LIRItem off(x->offset(), this); 2525 2526 off.load_item(); 2527 src.load_item(); 2528 2529 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS; 2530 2531 if (x->is_volatile()) { 2532 decorators |= MO_SEQ_CST; 2533 } 2534 if (type == T_BOOLEAN) { 2535 decorators |= C1_MASK_BOOLEAN; 2536 } 2537 if (is_reference_type(type)) { 2538 decorators |= ON_UNKNOWN_OOP_REF; 2539 } 2540 2541 LIR_Opr result = rlock_result(x, type); 2542 if (!x->is_raw()) { 2543 access_load_at(decorators, type, src, off.result(), result); 2544 } else { 2545 // Currently it is only used in GraphBuilder::setup_osr_entry_block. 2546 // It reads the value from [src + offset] directly. 2547 #ifdef _LP64 2548 LIR_Opr offset = new_register(T_LONG); 2549 __ convert(Bytecodes::_i2l, off.result(), offset); 2550 #else 2551 LIR_Opr offset = off.result(); 2552 #endif 2553 LIR_Address* addr = new LIR_Address(src.result(), offset, type); 2554 if (is_reference_type(type)) { 2555 __ move_wide(addr, result); 2556 } else { 2557 __ move(addr, result); 2558 } 2559 } 2560 } 2561 2562 2563 void LIRGenerator::do_UnsafePut(UnsafePut* x) { 2564 BasicType type = x->basic_type(); 2565 LIRItem src(x->object(), this); 2566 LIRItem off(x->offset(), this); 2567 LIRItem data(x->value(), this); 2568 2569 src.load_item(); 2570 if (type == T_BOOLEAN || type == T_BYTE) { 2571 data.load_byte_item(); 2572 } else { 2573 data.load_item(); 2574 } 2575 off.load_item(); 2576 2577 set_no_result(x); 2578 2579 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS; 2580 if (is_reference_type(type)) { 2581 decorators |= ON_UNKNOWN_OOP_REF; 2582 } 2583 if (x->is_volatile()) { 2584 decorators |= MO_SEQ_CST; 2585 } 2586 access_store_at(decorators, type, src, off.result(), data.result()); 2587 } 2588 2589 void LIRGenerator::do_UnsafeGetAndSet(UnsafeGetAndSet* x) { 2590 BasicType type = x->basic_type(); 2591 LIRItem src(x->object(), this); 2592 LIRItem off(x->offset(), this); 2593 LIRItem value(x->value(), this); 2594 2595 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST; 2596 2597 if (is_reference_type(type)) { 2598 decorators |= ON_UNKNOWN_OOP_REF; 2599 } 2600 2601 LIR_Opr result; 2602 if (x->is_add()) { 2603 result = access_atomic_add_at(decorators, type, src, off, value); 2604 } else { 2605 result = access_atomic_xchg_at(decorators, type, src, off, value); 2606 } 2607 set_result(x, result); 2608 } 2609 2610 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) { 2611 int lng = x->length(); 2612 2613 for (int i = 0; i < lng; i++) { 2614 C1SwitchRange* one_range = x->at(i); 2615 int low_key = one_range->low_key(); 2616 int high_key = one_range->high_key(); 2617 BlockBegin* dest = one_range->sux(); 2618 if (low_key == high_key) { 2619 __ cmp(lir_cond_equal, value, low_key); 2620 __ branch(lir_cond_equal, dest); 2621 } else if (high_key - low_key == 1) { 2622 __ cmp(lir_cond_equal, value, low_key); 2623 __ branch(lir_cond_equal, dest); 2624 __ cmp(lir_cond_equal, value, high_key); 2625 __ branch(lir_cond_equal, dest); 2626 } else { 2627 LabelObj* L = new LabelObj(); 2628 __ cmp(lir_cond_less, value, low_key); 2629 __ branch(lir_cond_less, L->label()); 2630 __ cmp(lir_cond_lessEqual, value, high_key); 2631 __ branch(lir_cond_lessEqual, dest); 2632 __ branch_destination(L->label()); 2633 } 2634 } 2635 __ jump(default_sux); 2636 } 2637 2638 2639 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) { 2640 SwitchRangeList* res = new SwitchRangeList(); 2641 int len = x->length(); 2642 if (len > 0) { 2643 BlockBegin* sux = x->sux_at(0); 2644 int key = x->lo_key(); 2645 BlockBegin* default_sux = x->default_sux(); 2646 C1SwitchRange* range = new C1SwitchRange(key, sux); 2647 for (int i = 0; i < len; i++, key++) { 2648 BlockBegin* new_sux = x->sux_at(i); 2649 if (sux == new_sux) { 2650 // still in same range 2651 range->set_high_key(key); 2652 } else { 2653 // skip tests which explicitly dispatch to the default 2654 if (sux != default_sux) { 2655 res->append(range); 2656 } 2657 range = new C1SwitchRange(key, new_sux); 2658 } 2659 sux = new_sux; 2660 } 2661 if (res->length() == 0 || res->last() != range) res->append(range); 2662 } 2663 return res; 2664 } 2665 2666 2667 // we expect the keys to be sorted by increasing value 2668 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) { 2669 SwitchRangeList* res = new SwitchRangeList(); 2670 int len = x->length(); 2671 if (len > 0) { 2672 BlockBegin* default_sux = x->default_sux(); 2673 int key = x->key_at(0); 2674 BlockBegin* sux = x->sux_at(0); 2675 C1SwitchRange* range = new C1SwitchRange(key, sux); 2676 for (int i = 1; i < len; i++) { 2677 int new_key = x->key_at(i); 2678 BlockBegin* new_sux = x->sux_at(i); 2679 if (key+1 == new_key && sux == new_sux) { 2680 // still in same range 2681 range->set_high_key(new_key); 2682 } else { 2683 // skip tests which explicitly dispatch to the default 2684 if (range->sux() != default_sux) { 2685 res->append(range); 2686 } 2687 range = new C1SwitchRange(new_key, new_sux); 2688 } 2689 key = new_key; 2690 sux = new_sux; 2691 } 2692 if (res->length() == 0 || res->last() != range) res->append(range); 2693 } 2694 return res; 2695 } 2696 2697 2698 void LIRGenerator::do_TableSwitch(TableSwitch* x) { 2699 LIRItem tag(x->tag(), this); 2700 tag.load_item(); 2701 set_no_result(x); 2702 2703 if (x->is_safepoint()) { 2704 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 2705 } 2706 2707 // move values into phi locations 2708 move_to_phi(x->state()); 2709 2710 int lo_key = x->lo_key(); 2711 int len = x->length(); 2712 assert(lo_key <= (lo_key + (len - 1)), "integer overflow"); 2713 LIR_Opr value = tag.result(); 2714 2715 if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) { 2716 ciMethod* method = x->state()->scope()->method(); 2717 ciMethodData* md = method->method_data_or_null(); 2718 assert(md != NULL, "Sanity"); 2719 ciProfileData* data = md->bci_to_data(x->state()->bci()); 2720 assert(data != NULL, "must have profiling data"); 2721 assert(data->is_MultiBranchData(), "bad profile data?"); 2722 int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset()); 2723 LIR_Opr md_reg = new_register(T_METADATA); 2724 __ metadata2reg(md->constant_encoding(), md_reg); 2725 LIR_Opr data_offset_reg = new_pointer_register(); 2726 LIR_Opr tmp_reg = new_pointer_register(); 2727 2728 __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg); 2729 for (int i = 0; i < len; i++) { 2730 int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i)); 2731 __ cmp(lir_cond_equal, value, i + lo_key); 2732 __ move(data_offset_reg, tmp_reg); 2733 __ cmove(lir_cond_equal, 2734 LIR_OprFact::intptrConst(count_offset), 2735 tmp_reg, 2736 data_offset_reg, T_INT); 2737 } 2738 2739 LIR_Opr data_reg = new_pointer_register(); 2740 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type()); 2741 __ move(data_addr, data_reg); 2742 __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg); 2743 __ move(data_reg, data_addr); 2744 } 2745 2746 if (UseTableRanges) { 2747 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 2748 } else { 2749 for (int i = 0; i < len; i++) { 2750 __ cmp(lir_cond_equal, value, i + lo_key); 2751 __ branch(lir_cond_equal, x->sux_at(i)); 2752 } 2753 __ jump(x->default_sux()); 2754 } 2755 } 2756 2757 2758 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) { 2759 LIRItem tag(x->tag(), this); 2760 tag.load_item(); 2761 set_no_result(x); 2762 2763 if (x->is_safepoint()) { 2764 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 2765 } 2766 2767 // move values into phi locations 2768 move_to_phi(x->state()); 2769 2770 LIR_Opr value = tag.result(); 2771 int len = x->length(); 2772 2773 if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) { 2774 ciMethod* method = x->state()->scope()->method(); 2775 ciMethodData* md = method->method_data_or_null(); 2776 assert(md != NULL, "Sanity"); 2777 ciProfileData* data = md->bci_to_data(x->state()->bci()); 2778 assert(data != NULL, "must have profiling data"); 2779 assert(data->is_MultiBranchData(), "bad profile data?"); 2780 int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset()); 2781 LIR_Opr md_reg = new_register(T_METADATA); 2782 __ metadata2reg(md->constant_encoding(), md_reg); 2783 LIR_Opr data_offset_reg = new_pointer_register(); 2784 LIR_Opr tmp_reg = new_pointer_register(); 2785 2786 __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg); 2787 for (int i = 0; i < len; i++) { 2788 int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i)); 2789 __ cmp(lir_cond_equal, value, x->key_at(i)); 2790 __ move(data_offset_reg, tmp_reg); 2791 __ cmove(lir_cond_equal, 2792 LIR_OprFact::intptrConst(count_offset), 2793 tmp_reg, 2794 data_offset_reg, T_INT); 2795 } 2796 2797 LIR_Opr data_reg = new_pointer_register(); 2798 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type()); 2799 __ move(data_addr, data_reg); 2800 __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg); 2801 __ move(data_reg, data_addr); 2802 } 2803 2804 if (UseTableRanges) { 2805 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 2806 } else { 2807 int len = x->length(); 2808 for (int i = 0; i < len; i++) { 2809 __ cmp(lir_cond_equal, value, x->key_at(i)); 2810 __ branch(lir_cond_equal, x->sux_at(i)); 2811 } 2812 __ jump(x->default_sux()); 2813 } 2814 } 2815 2816 2817 void LIRGenerator::do_Goto(Goto* x) { 2818 set_no_result(x); 2819 2820 if (block()->next()->as_OsrEntry()) { 2821 // need to free up storage used for OSR entry point 2822 LIR_Opr osrBuffer = block()->next()->operand(); 2823 BasicTypeList signature; 2824 signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer 2825 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 2826 __ move(osrBuffer, cc->args()->at(0)); 2827 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end), 2828 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args()); 2829 } 2830 2831 if (x->is_safepoint()) { 2832 ValueStack* state = x->state_before() ? x->state_before() : x->state(); 2833 2834 // increment backedge counter if needed 2835 CodeEmitInfo* info = state_for(x, state); 2836 increment_backedge_counter(info, x->profiled_bci()); 2837 CodeEmitInfo* safepoint_info = state_for(x, state); 2838 __ safepoint(safepoint_poll_register(), safepoint_info); 2839 } 2840 2841 // Gotos can be folded Ifs, handle this case. 2842 if (x->should_profile()) { 2843 ciMethod* method = x->profiled_method(); 2844 assert(method != NULL, "method should be set if branch is profiled"); 2845 ciMethodData* md = method->method_data_or_null(); 2846 assert(md != NULL, "Sanity"); 2847 ciProfileData* data = md->bci_to_data(x->profiled_bci()); 2848 assert(data != NULL, "must have profiling data"); 2849 int offset; 2850 if (x->direction() == Goto::taken) { 2851 assert(data->is_BranchData(), "need BranchData for two-way branches"); 2852 offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); 2853 } else if (x->direction() == Goto::not_taken) { 2854 assert(data->is_BranchData(), "need BranchData for two-way branches"); 2855 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); 2856 } else { 2857 assert(data->is_JumpData(), "need JumpData for branches"); 2858 offset = md->byte_offset_of_slot(data, JumpData::taken_offset()); 2859 } 2860 LIR_Opr md_reg = new_register(T_METADATA); 2861 __ metadata2reg(md->constant_encoding(), md_reg); 2862 2863 increment_counter(new LIR_Address(md_reg, offset, 2864 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment); 2865 } 2866 2867 // emit phi-instruction move after safepoint since this simplifies 2868 // describing the state as the safepoint. 2869 move_to_phi(x->state()); 2870 2871 __ jump(x->default_sux()); 2872 } 2873 2874 /** 2875 * Emit profiling code if needed for arguments, parameters, return value types 2876 * 2877 * @param md MDO the code will update at runtime 2878 * @param md_base_offset common offset in the MDO for this profile and subsequent ones 2879 * @param md_offset offset in the MDO (on top of md_base_offset) for this profile 2880 * @param profiled_k current profile 2881 * @param obj IR node for the object to be profiled 2882 * @param mdp register to hold the pointer inside the MDO (md + md_base_offset). 2883 * Set once we find an update to make and use for next ones. 2884 * @param not_null true if we know obj cannot be null 2885 * @param signature_at_call_k signature at call for obj 2886 * @param callee_signature_k signature of callee for obj 2887 * at call and callee signatures differ at method handle call 2888 * @return the only klass we know will ever be seen at this profile point 2889 */ 2890 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, 2891 Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k, 2892 ciKlass* callee_signature_k) { 2893 ciKlass* result = NULL; 2894 bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k); 2895 bool do_update = !TypeEntries::is_type_unknown(profiled_k); 2896 // known not to be null or null bit already set and already set to 2897 // unknown: nothing we can do to improve profiling 2898 if (!do_null && !do_update) { 2899 return result; 2900 } 2901 2902 ciKlass* exact_klass = NULL; 2903 Compilation* comp = Compilation::current(); 2904 if (do_update) { 2905 // try to find exact type, using CHA if possible, so that loading 2906 // the klass from the object can be avoided 2907 ciType* type = obj->exact_type(); 2908 if (type == NULL) { 2909 type = obj->declared_type(); 2910 type = comp->cha_exact_type(type); 2911 } 2912 assert(type == NULL || type->is_klass(), "type should be class"); 2913 exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL; 2914 2915 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass; 2916 } 2917 2918 if (!do_null && !do_update) { 2919 return result; 2920 } 2921 2922 ciKlass* exact_signature_k = NULL; 2923 if (do_update && signature_at_call_k != NULL) { 2924 // Is the type from the signature exact (the only one possible)? 2925 exact_signature_k = signature_at_call_k->exact_klass(); 2926 if (exact_signature_k == NULL) { 2927 exact_signature_k = comp->cha_exact_type(signature_at_call_k); 2928 } else { 2929 result = exact_signature_k; 2930 // Known statically. No need to emit any code: prevent 2931 // LIR_Assembler::emit_profile_type() from emitting useless code 2932 profiled_k = ciTypeEntries::with_status(result, profiled_k); 2933 } 2934 // exact_klass and exact_signature_k can be both non NULL but 2935 // different if exact_klass is loaded after the ciObject for 2936 // exact_signature_k is created. 2937 if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) { 2938 // sometimes the type of the signature is better than the best type 2939 // the compiler has 2940 exact_klass = exact_signature_k; 2941 } 2942 if (callee_signature_k != NULL && 2943 callee_signature_k != signature_at_call_k) { 2944 ciKlass* improved_klass = callee_signature_k->exact_klass(); 2945 if (improved_klass == NULL) { 2946 improved_klass = comp->cha_exact_type(callee_signature_k); 2947 } 2948 if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) { 2949 exact_klass = exact_signature_k; 2950 } 2951 } 2952 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass; 2953 } 2954 2955 if (!do_null && !do_update) { 2956 return result; 2957 } 2958 2959 if (mdp == LIR_OprFact::illegalOpr) { 2960 mdp = new_register(T_METADATA); 2961 __ metadata2reg(md->constant_encoding(), mdp); 2962 if (md_base_offset != 0) { 2963 LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS); 2964 mdp = new_pointer_register(); 2965 __ leal(LIR_OprFact::address(base_type_address), mdp); 2966 } 2967 } 2968 LIRItem value(obj, this); 2969 value.load_item(); 2970 __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA), 2971 value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL); 2972 return result; 2973 } 2974 2975 // profile parameters on entry to the root of the compilation 2976 void LIRGenerator::profile_parameters(Base* x) { 2977 if (compilation()->profile_parameters()) { 2978 CallingConvention* args = compilation()->frame_map()->incoming_arguments(); 2979 ciMethodData* md = scope()->method()->method_data_or_null(); 2980 assert(md != NULL, "Sanity"); 2981 2982 if (md->parameters_type_data() != NULL) { 2983 ciParametersTypeData* parameters_type_data = md->parameters_type_data(); 2984 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters(); 2985 LIR_Opr mdp = LIR_OprFact::illegalOpr; 2986 for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) { 2987 LIR_Opr src = args->at(i); 2988 assert(!src->is_illegal(), "check"); 2989 BasicType t = src->type(); 2990 if (is_reference_type(t)) { 2991 intptr_t profiled_k = parameters->type(j); 2992 Local* local = x->state()->local_at(java_index)->as_Local(); 2993 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)), 2994 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)), 2995 profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL); 2996 // If the profile is known statically set it once for all and do not emit any code 2997 if (exact != NULL) { 2998 md->set_parameter_type(j, exact); 2999 } 3000 j++; 3001 } 3002 java_index += type2size[t]; 3003 } 3004 } 3005 } 3006 } 3007 3008 void LIRGenerator::profile_flags(ciMethodData* md, ciProfileData* data, int flag, LIR_Condition condition) { 3009 assert(md != NULL && data != NULL, "should have been initialized"); 3010 LIR_Opr mdp = new_register(T_METADATA); 3011 __ metadata2reg(md->constant_encoding(), mdp); 3012 LIR_Address* addr = new LIR_Address(mdp, md->byte_offset_of_slot(data, DataLayout::flags_offset()), T_BYTE); 3013 LIR_Opr flags = new_register(T_INT); 3014 __ move(addr, flags); 3015 if (condition != lir_cond_always) { 3016 LIR_Opr update = new_register(T_INT); 3017 __ cmove(condition, LIR_OprFact::intConst(0), LIR_OprFact::intConst(flag), update, T_INT); 3018 } else { 3019 __ logical_or(flags, LIR_OprFact::intConst(flag), flags); 3020 } 3021 __ store(flags, addr); 3022 } 3023 3024 void LIRGenerator::profile_null_free_array(LIRItem array, ciMethodData* md, ciArrayLoadStoreData* load_store) { 3025 assert(compilation()->profile_array_accesses(), "array access profiling is disabled"); 3026 LabelObj* L_end = new LabelObj(); 3027 LIR_Opr tmp = new_register(T_METADATA); 3028 __ check_null_free_array(array.result(), tmp); 3029 3030 profile_flags(md, load_store, ArrayLoadStoreData::null_free_array_byte_constant(), lir_cond_equal); 3031 } 3032 3033 void LIRGenerator::profile_array_type(AccessIndexed* x, ciMethodData*& md, ciArrayLoadStoreData*& load_store) { 3034 assert(compilation()->profile_array_accesses(), "array access profiling is disabled"); 3035 int bci = x->profiled_bci(); 3036 md = x->profiled_method()->method_data(); 3037 assert(md != NULL, "Sanity"); 3038 ciProfileData* data = md->bci_to_data(bci); 3039 assert(data != NULL && data->is_ArrayLoadStoreData(), "incorrect profiling entry"); 3040 load_store = (ciArrayLoadStoreData*)data; 3041 LIR_Opr mdp = LIR_OprFact::illegalOpr; 3042 profile_type(md, md->byte_offset_of_slot(load_store, ArrayLoadStoreData::array_offset()), 0, 3043 load_store->array()->type(), x->array(), mdp, true, NULL, NULL); 3044 } 3045 3046 void LIRGenerator::profile_element_type(Value element, ciMethodData* md, ciArrayLoadStoreData* load_store) { 3047 assert(compilation()->profile_array_accesses(), "array access profiling is disabled"); 3048 assert(md != NULL && load_store != NULL, "should have been initialized"); 3049 LIR_Opr mdp = LIR_OprFact::illegalOpr; 3050 profile_type(md, md->byte_offset_of_slot(load_store, ArrayLoadStoreData::element_offset()), 0, 3051 load_store->element()->type(), element, mdp, false, NULL, NULL); 3052 } 3053 3054 void LIRGenerator::do_Base(Base* x) { 3055 __ std_entry(LIR_OprFact::illegalOpr); 3056 // Emit moves from physical registers / stack slots to virtual registers 3057 CallingConvention* args = compilation()->frame_map()->incoming_arguments(); 3058 IRScope* irScope = compilation()->hir()->top_scope(); 3059 int java_index = 0; 3060 for (int i = 0; i < args->length(); i++) { 3061 LIR_Opr src = args->at(i); 3062 assert(!src->is_illegal(), "check"); 3063 BasicType t = src->type(); 3064 3065 // Types which are smaller than int are passed as int, so 3066 // correct the type which passed. 3067 switch (t) { 3068 case T_BYTE: 3069 case T_BOOLEAN: 3070 case T_SHORT: 3071 case T_CHAR: 3072 t = T_INT; 3073 break; 3074 default: 3075 break; 3076 } 3077 3078 LIR_Opr dest = new_register(t); 3079 __ move(src, dest); 3080 3081 // Assign new location to Local instruction for this local 3082 Local* local = x->state()->local_at(java_index)->as_Local(); 3083 assert(local != NULL, "Locals for incoming arguments must have been created"); 3084 #ifndef __SOFTFP__ 3085 // The java calling convention passes double as long and float as int. 3086 assert(as_ValueType(t)->tag() == local->type()->tag(), "check"); 3087 #endif // __SOFTFP__ 3088 local->set_operand(dest); 3089 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL); 3090 java_index += type2size[t]; 3091 } 3092 3093 if (compilation()->env()->dtrace_method_probes()) { 3094 BasicTypeList signature; 3095 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 3096 signature.append(T_METADATA); // Method* 3097 LIR_OprList* args = new LIR_OprList(); 3098 args->append(getThreadPointer()); 3099 LIR_Opr meth = new_register(T_METADATA); 3100 __ metadata2reg(method()->constant_encoding(), meth); 3101 args->append(meth); 3102 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL); 3103 } 3104 3105 if (method()->is_synchronized()) { 3106 LIR_Opr obj; 3107 if (method()->is_static()) { 3108 obj = new_register(T_OBJECT); 3109 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj); 3110 } else { 3111 Local* receiver = x->state()->local_at(0)->as_Local(); 3112 assert(receiver != NULL, "must already exist"); 3113 obj = receiver->operand(); 3114 } 3115 assert(obj->is_valid(), "must be valid"); 3116 3117 if (method()->is_synchronized() && GenerateSynchronizationCode) { 3118 LIR_Opr lock = syncLockOpr(); 3119 __ load_stack_address_monitor(0, lock); 3120 3121 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException)); 3122 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); 3123 3124 // receiver is guaranteed non-NULL so don't need CodeEmitInfo 3125 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); 3126 } 3127 } 3128 // increment invocation counters if needed 3129 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. 3130 profile_parameters(x); 3131 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false); 3132 increment_invocation_counter(info); 3133 } 3134 if (method()->has_scalarized_args()) { 3135 // Check if deoptimization was triggered (i.e. orig_pc was set) while buffering scalarized inline type arguments 3136 // in the entry point (see comments in frame::deoptimize). If so, deoptimize only now that we have the right state. 3137 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false); 3138 CodeStub* deopt_stub = new DeoptimizeStub(info, Deoptimization::Reason_none, Deoptimization::Action_none); 3139 __ append(new LIR_Op0(lir_check_orig_pc)); 3140 __ branch(lir_cond_notEqual, deopt_stub); 3141 } 3142 3143 // all blocks with a successor must end with an unconditional jump 3144 // to the successor even if they are consecutive 3145 __ jump(x->default_sux()); 3146 } 3147 3148 3149 void LIRGenerator::do_OsrEntry(OsrEntry* x) { 3150 // construct our frame and model the production of incoming pointer 3151 // to the OSR buffer. 3152 __ osr_entry(LIR_Assembler::osrBufferPointer()); 3153 LIR_Opr result = rlock_result(x); 3154 __ move(LIR_Assembler::osrBufferPointer(), result); 3155 } 3156 3157 void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) { 3158 if (loc->is_register()) { 3159 param->load_item_force(loc); 3160 } else { 3161 LIR_Address* addr = loc->as_address_ptr(); 3162 param->load_for_store(addr->type()); 3163 assert(addr->type() != T_PRIMITIVE_OBJECT, "not supported yet"); 3164 if (addr->type() == T_OBJECT) { 3165 __ move_wide(param->result(), addr); 3166 } else { 3167 __ move(param->result(), addr); 3168 } 3169 } 3170 } 3171 3172 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { 3173 assert(args->length() == arg_list->length(), 3174 "args=%d, arg_list=%d", args->length(), arg_list->length()); 3175 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) { 3176 LIRItem* param = args->at(i); 3177 LIR_Opr loc = arg_list->at(i); 3178 invoke_load_one_argument(param, loc); 3179 } 3180 3181 if (x->has_receiver()) { 3182 LIRItem* receiver = args->at(0); 3183 LIR_Opr loc = arg_list->at(0); 3184 if (loc->is_register()) { 3185 receiver->load_item_force(loc); 3186 } else { 3187 assert(loc->is_address(), "just checking"); 3188 receiver->load_for_store(T_OBJECT); 3189 __ move_wide(receiver->result(), loc->as_address_ptr()); 3190 } 3191 } 3192 } 3193 3194 3195 // Visits all arguments, returns appropriate items without loading them 3196 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) { 3197 LIRItemList* argument_items = new LIRItemList(); 3198 if (x->has_receiver()) { 3199 LIRItem* receiver = new LIRItem(x->receiver(), this); 3200 argument_items->append(receiver); 3201 } 3202 for (int i = 0; i < x->number_of_arguments(); i++) { 3203 LIRItem* param = new LIRItem(x->argument_at(i), this); 3204 argument_items->append(param); 3205 } 3206 return argument_items; 3207 } 3208 3209 3210 // The invoke with receiver has following phases: 3211 // a) traverse and load/lock receiver; 3212 // b) traverse all arguments -> item-array (invoke_visit_argument) 3213 // c) push receiver on stack 3214 // d) load each of the items and push on stack 3215 // e) unlock receiver 3216 // f) move receiver into receiver-register %o0 3217 // g) lock result registers and emit call operation 3218 // 3219 // Before issuing a call, we must spill-save all values on stack 3220 // that are in caller-save register. "spill-save" moves those registers 3221 // either in a free callee-save register or spills them if no free 3222 // callee save register is available. 3223 // 3224 // The problem is where to invoke spill-save. 3225 // - if invoked between e) and f), we may lock callee save 3226 // register in "spill-save" that destroys the receiver register 3227 // before f) is executed 3228 // - if we rearrange f) to be earlier (by loading %o0) it 3229 // may destroy a value on the stack that is currently in %o0 3230 // and is waiting to be spilled 3231 // - if we keep the receiver locked while doing spill-save, 3232 // we cannot spill it as it is spill-locked 3233 // 3234 void LIRGenerator::do_Invoke(Invoke* x) { 3235 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true); 3236 3237 LIR_OprList* arg_list = cc->args(); 3238 LIRItemList* args = invoke_visit_arguments(x); 3239 LIR_Opr receiver = LIR_OprFact::illegalOpr; 3240 3241 // setup result register 3242 LIR_Opr result_register = LIR_OprFact::illegalOpr; 3243 if (x->type() != voidType) { 3244 result_register = result_register_for(x->type()); 3245 } 3246 3247 CodeEmitInfo* info = state_for(x, x->state()); 3248 3249 invoke_load_arguments(x, args, arg_list); 3250 3251 if (x->has_receiver()) { 3252 args->at(0)->load_item_force(LIR_Assembler::receiverOpr()); 3253 receiver = args->at(0)->result(); 3254 } 3255 3256 // emit invoke code 3257 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match"); 3258 3259 // JSR 292 3260 // Preserve the SP over MethodHandle call sites, if needed. 3261 ciMethod* target = x->target(); 3262 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant? 3263 target->is_method_handle_intrinsic() || 3264 target->is_compiled_lambda_form()); 3265 if (is_method_handle_invoke) { 3266 info->set_is_method_handle_invoke(true); 3267 if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) { 3268 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr()); 3269 } 3270 } 3271 3272 switch (x->code()) { 3273 case Bytecodes::_invokestatic: 3274 __ call_static(target, result_register, 3275 SharedRuntime::get_resolve_static_call_stub(), 3276 arg_list, info); 3277 break; 3278 case Bytecodes::_invokespecial: 3279 case Bytecodes::_invokevirtual: 3280 case Bytecodes::_invokeinterface: 3281 // for loaded and final (method or class) target we still produce an inline cache, 3282 // in order to be able to call mixed mode 3283 if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) { 3284 __ call_opt_virtual(target, receiver, result_register, 3285 SharedRuntime::get_resolve_opt_virtual_call_stub(), 3286 arg_list, info); 3287 } else { 3288 __ call_icvirtual(target, receiver, result_register, 3289 SharedRuntime::get_resolve_virtual_call_stub(), 3290 arg_list, info); 3291 } 3292 break; 3293 case Bytecodes::_invokedynamic: { 3294 __ call_dynamic(target, receiver, result_register, 3295 SharedRuntime::get_resolve_static_call_stub(), 3296 arg_list, info); 3297 break; 3298 } 3299 default: 3300 fatal("unexpected bytecode: %s", Bytecodes::name(x->code())); 3301 break; 3302 } 3303 3304 // JSR 292 3305 // Restore the SP after MethodHandle call sites, if needed. 3306 if (is_method_handle_invoke 3307 && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) { 3308 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer()); 3309 } 3310 3311 if (result_register->is_valid()) { 3312 LIR_Opr result = rlock_result(x); 3313 __ move(result_register, result); 3314 } 3315 } 3316 3317 3318 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) { 3319 assert(x->number_of_arguments() == 1, "wrong type"); 3320 LIRItem value (x->argument_at(0), this); 3321 LIR_Opr reg = rlock_result(x); 3322 value.load_item(); 3323 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type())); 3324 __ move(tmp, reg); 3325 } 3326 3327 3328 3329 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval() 3330 void LIRGenerator::do_IfOp(IfOp* x) { 3331 #ifdef ASSERT 3332 { 3333 ValueTag xtag = x->x()->type()->tag(); 3334 ValueTag ttag = x->tval()->type()->tag(); 3335 assert(xtag == intTag || xtag == objectTag, "cannot handle others"); 3336 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others"); 3337 assert(ttag == x->fval()->type()->tag(), "cannot handle others"); 3338 } 3339 #endif 3340 3341 LIRItem left(x->x(), this); 3342 LIRItem right(x->y(), this); 3343 left.load_item(); 3344 if (can_inline_as_constant(right.value()) && !x->substitutability_check()) { 3345 right.dont_load_item(); 3346 } else { 3347 // substitutability_check() needs to use right as a base register. 3348 right.load_item(); 3349 } 3350 3351 LIRItem t_val(x->tval(), this); 3352 LIRItem f_val(x->fval(), this); 3353 t_val.dont_load_item(); 3354 f_val.dont_load_item(); 3355 3356 if (x->substitutability_check()) { 3357 substitutability_check(x, left, right, t_val, f_val); 3358 } else { 3359 LIR_Opr reg = rlock_result(x); 3360 __ cmp(lir_cond(x->cond()), left.result(), right.result()); 3361 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); 3362 } 3363 } 3364 3365 void LIRGenerator::substitutability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) { 3366 assert(x->cond() == If::eql || x->cond() == If::neq, "must be"); 3367 bool is_acmpeq = (x->cond() == If::eql); 3368 LIR_Opr equal_result = is_acmpeq ? t_val.result() : f_val.result(); 3369 LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result(); 3370 LIR_Opr result = rlock_result(x); 3371 CodeEmitInfo* info = state_for(x, x->state_before()); 3372 3373 substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info); 3374 } 3375 3376 void LIRGenerator::substitutability_check(If* x, LIRItem& left, LIRItem& right) { 3377 LIR_Opr equal_result = LIR_OprFact::intConst(1); 3378 LIR_Opr not_equal_result = LIR_OprFact::intConst(0); 3379 LIR_Opr result = new_register(T_INT); 3380 CodeEmitInfo* info = state_for(x, x->state_before()); 3381 3382 substitutability_check_common(x->x(), x->y(), left, right, equal_result, not_equal_result, result, info); 3383 3384 assert(x->cond() == If::eql || x->cond() == If::neq, "must be"); 3385 __ cmp(lir_cond(x->cond()), result, equal_result); 3386 } 3387 3388 void LIRGenerator::substitutability_check_common(Value left_val, Value right_val, LIRItem& left, LIRItem& right, 3389 LIR_Opr equal_result, LIR_Opr not_equal_result, LIR_Opr result, 3390 CodeEmitInfo* info) { 3391 LIR_Opr tmp1 = LIR_OprFact::illegalOpr; 3392 LIR_Opr tmp2 = LIR_OprFact::illegalOpr; 3393 LIR_Opr left_klass_op = LIR_OprFact::illegalOpr; 3394 LIR_Opr right_klass_op = LIR_OprFact::illegalOpr; 3395 3396 ciKlass* left_klass = left_val ->as_loaded_klass_or_null(); 3397 ciKlass* right_klass = right_val->as_loaded_klass_or_null(); 3398 3399 if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node. 3400 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) { 3401 init_temps_for_substitutability_check(tmp1, tmp2); 3402 } 3403 3404 if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) { 3405 // No need to load klass -- the operands are statically known to be the same inline klass. 3406 } else { 3407 BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA; 3408 left_klass_op = new_register(t_klass); 3409 right_klass_op = new_register(t_klass); 3410 } 3411 3412 CodeStub* slow_path = new SubstitutabilityCheckStub(left.result(), right.result(), info); 3413 __ substitutability_check(result, left.result(), right.result(), equal_result, not_equal_result, 3414 tmp1, tmp2, 3415 left_klass, right_klass, left_klass_op, right_klass_op, info, slow_path); 3416 } 3417 3418 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) { 3419 assert(x->number_of_arguments() == 0, "wrong type"); 3420 // Enforce computation of _reserved_argument_area_size which is required on some platforms. 3421 BasicTypeList signature; 3422 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 3423 LIR_Opr reg = result_register_for(x->type()); 3424 __ call_runtime_leaf(routine, getThreadTemp(), 3425 reg, new LIR_OprList()); 3426 LIR_Opr result = rlock_result(x); 3427 __ move(reg, result); 3428 } 3429 3430 3431 3432 void LIRGenerator::do_Intrinsic(Intrinsic* x) { 3433 switch (x->id()) { 3434 case vmIntrinsics::_intBitsToFloat : 3435 case vmIntrinsics::_doubleToRawLongBits : 3436 case vmIntrinsics::_longBitsToDouble : 3437 case vmIntrinsics::_floatToRawIntBits : { 3438 do_FPIntrinsics(x); 3439 break; 3440 } 3441 3442 #ifdef JFR_HAVE_INTRINSICS 3443 case vmIntrinsics::_counterTime: 3444 do_RuntimeCall(CAST_FROM_FN_PTR(address, JfrTime::time_function()), x); 3445 break; 3446 #endif 3447 3448 case vmIntrinsics::_currentTimeMillis: 3449 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x); 3450 break; 3451 3452 case vmIntrinsics::_nanoTime: 3453 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x); 3454 break; 3455 3456 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; 3457 case vmIntrinsics::_isInstance: do_isInstance(x); break; 3458 case vmIntrinsics::_isPrimitive: do_isPrimitive(x); break; 3459 case vmIntrinsics::_getModifiers: do_getModifiers(x); break; 3460 case vmIntrinsics::_getClass: do_getClass(x); break; 3461 case vmIntrinsics::_getObjectSize: do_getObjectSize(x); break; 3462 case vmIntrinsics::_currentCarrierThread: do_currentCarrierThread(x); break; 3463 case vmIntrinsics::_currentThread: do_vthread(x); break; 3464 case vmIntrinsics::_scopedValueCache: do_scopedValueCache(x); break; 3465 3466 case vmIntrinsics::_dlog: // fall through 3467 case vmIntrinsics::_dlog10: // fall through 3468 case vmIntrinsics::_dabs: // fall through 3469 case vmIntrinsics::_dsqrt: // fall through 3470 case vmIntrinsics::_dsqrt_strict: // fall through 3471 case vmIntrinsics::_dtan: // fall through 3472 case vmIntrinsics::_dsin : // fall through 3473 case vmIntrinsics::_dcos : // fall through 3474 case vmIntrinsics::_dexp : // fall through 3475 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break; 3476 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break; 3477 3478 case vmIntrinsics::_fmaD: do_FmaIntrinsic(x); break; 3479 case vmIntrinsics::_fmaF: do_FmaIntrinsic(x); break; 3480 3481 case vmIntrinsics::_Preconditions_checkIndex: 3482 do_PreconditionsCheckIndex(x, T_INT); 3483 break; 3484 case vmIntrinsics::_Preconditions_checkLongIndex: 3485 do_PreconditionsCheckIndex(x, T_LONG); 3486 break; 3487 3488 case vmIntrinsics::_compareAndSetReference: 3489 do_CompareAndSwap(x, objectType); 3490 break; 3491 case vmIntrinsics::_compareAndSetInt: 3492 do_CompareAndSwap(x, intType); 3493 break; 3494 case vmIntrinsics::_compareAndSetLong: 3495 do_CompareAndSwap(x, longType); 3496 break; 3497 3498 case vmIntrinsics::_loadFence : 3499 __ membar_acquire(); 3500 break; 3501 case vmIntrinsics::_storeFence: 3502 __ membar_release(); 3503 break; 3504 case vmIntrinsics::_storeStoreFence: 3505 __ membar_storestore(); 3506 break; 3507 case vmIntrinsics::_fullFence : 3508 __ membar(); 3509 break; 3510 case vmIntrinsics::_onSpinWait: 3511 __ on_spin_wait(); 3512 break; 3513 case vmIntrinsics::_Reference_get: 3514 do_Reference_get(x); 3515 break; 3516 3517 case vmIntrinsics::_updateCRC32: 3518 case vmIntrinsics::_updateBytesCRC32: 3519 case vmIntrinsics::_updateByteBufferCRC32: 3520 do_update_CRC32(x); 3521 break; 3522 3523 case vmIntrinsics::_updateBytesCRC32C: 3524 case vmIntrinsics::_updateDirectByteBufferCRC32C: 3525 do_update_CRC32C(x); 3526 break; 3527 3528 case vmIntrinsics::_vectorizedMismatch: 3529 do_vectorizedMismatch(x); 3530 break; 3531 3532 case vmIntrinsics::_blackhole: 3533 do_blackhole(x); 3534 break; 3535 3536 default: ShouldNotReachHere(); break; 3537 } 3538 } 3539 3540 void LIRGenerator::profile_arguments(ProfileCall* x) { 3541 if (compilation()->profile_arguments()) { 3542 int bci = x->bci_of_invoke(); 3543 ciMethodData* md = x->method()->method_data_or_null(); 3544 assert(md != NULL, "Sanity"); 3545 ciProfileData* data = md->bci_to_data(bci); 3546 if (data != NULL) { 3547 if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) || 3548 (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) { 3549 ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset(); 3550 int base_offset = md->byte_offset_of_slot(data, extra); 3551 LIR_Opr mdp = LIR_OprFact::illegalOpr; 3552 ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args(); 3553 3554 Bytecodes::Code bc = x->method()->java_code_at_bci(bci); 3555 int start = 0; 3556 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments(); 3557 if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) { 3558 // first argument is not profiled at call (method handle invoke) 3559 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected"); 3560 start = 1; 3561 } 3562 ciSignature* callee_signature = x->callee()->signature(); 3563 // method handle call to virtual method 3564 bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc); 3565 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL); 3566 3567 bool ignored_will_link; 3568 ciSignature* signature_at_call = NULL; 3569 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 3570 ciSignatureStream signature_at_call_stream(signature_at_call); 3571 3572 // if called through method handle invoke, some arguments may have been popped 3573 for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) { 3574 int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset()); 3575 ciKlass* exact = profile_type(md, base_offset, off, 3576 args->type(i), x->profiled_arg_at(i+start), mdp, 3577 !x->arg_needs_null_check(i+start), 3578 signature_at_call_stream.next_klass(), callee_signature_stream.next_klass()); 3579 if (exact != NULL) { 3580 md->set_argument_type(bci, i, exact); 3581 } 3582 } 3583 } else { 3584 #ifdef ASSERT 3585 Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke()); 3586 int n = x->nb_profiled_args(); 3587 assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() || 3588 (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))), 3589 "only at JSR292 bytecodes"); 3590 #endif 3591 } 3592 } 3593 } 3594 } 3595 3596 // profile parameters on entry to an inlined method 3597 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) { 3598 if (compilation()->profile_parameters() && x->inlined()) { 3599 ciMethodData* md = x->callee()->method_data_or_null(); 3600 if (md != NULL) { 3601 ciParametersTypeData* parameters_type_data = md->parameters_type_data(); 3602 if (parameters_type_data != NULL) { 3603 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters(); 3604 LIR_Opr mdp = LIR_OprFact::illegalOpr; 3605 bool has_receiver = !x->callee()->is_static(); 3606 ciSignature* sig = x->callee()->signature(); 3607 ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL); 3608 int i = 0; // to iterate on the Instructions 3609 Value arg = x->recv(); 3610 bool not_null = false; 3611 int bci = x->bci_of_invoke(); 3612 Bytecodes::Code bc = x->method()->java_code_at_bci(bci); 3613 // The first parameter is the receiver so that's what we start 3614 // with if it exists. One exception is method handle call to 3615 // virtual method: the receiver is in the args list 3616 if (arg == NULL || !Bytecodes::has_receiver(bc)) { 3617 i = 1; 3618 arg = x->profiled_arg_at(0); 3619 not_null = !x->arg_needs_null_check(0); 3620 } 3621 int k = 0; // to iterate on the profile data 3622 for (;;) { 3623 intptr_t profiled_k = parameters->type(k); 3624 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)), 3625 in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)), 3626 profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL); 3627 // If the profile is known statically set it once for all and do not emit any code 3628 if (exact != NULL) { 3629 md->set_parameter_type(k, exact); 3630 } 3631 k++; 3632 if (k >= parameters_type_data->number_of_parameters()) { 3633 #ifdef ASSERT 3634 int extra = 0; 3635 if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 && 3636 x->nb_profiled_args() >= TypeProfileParmsLimit && 3637 x->recv() != NULL && Bytecodes::has_receiver(bc)) { 3638 extra += 1; 3639 } 3640 assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?"); 3641 #endif 3642 break; 3643 } 3644 arg = x->profiled_arg_at(i); 3645 not_null = !x->arg_needs_null_check(i); 3646 i++; 3647 } 3648 } 3649 } 3650 } 3651 } 3652 3653 void LIRGenerator::do_ProfileCall(ProfileCall* x) { 3654 // Need recv in a temporary register so it interferes with the other temporaries 3655 LIR_Opr recv = LIR_OprFact::illegalOpr; 3656 LIR_Opr mdo = new_register(T_METADATA); 3657 // tmp is used to hold the counters on SPARC 3658 LIR_Opr tmp = new_pointer_register(); 3659 3660 if (x->nb_profiled_args() > 0) { 3661 profile_arguments(x); 3662 } 3663 3664 // profile parameters on inlined method entry including receiver 3665 if (x->recv() != NULL || x->nb_profiled_args() > 0) { 3666 profile_parameters_at_call(x); 3667 } 3668 3669 if (x->recv() != NULL) { 3670 LIRItem value(x->recv(), this); 3671 value.load_item(); 3672 recv = new_register(T_OBJECT); 3673 __ move(value.result(), recv); 3674 } 3675 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder()); 3676 } 3677 3678 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) { 3679 int bci = x->bci_of_invoke(); 3680 ciMethodData* md = x->method()->method_data_or_null(); 3681 assert(md != NULL, "Sanity"); 3682 ciProfileData* data = md->bci_to_data(bci); 3683 if (data != NULL) { 3684 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type"); 3685 ciSingleTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret(); 3686 LIR_Opr mdp = LIR_OprFact::illegalOpr; 3687 3688 bool ignored_will_link; 3689 ciSignature* signature_at_call = NULL; 3690 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 3691 3692 // The offset within the MDO of the entry to update may be too large 3693 // to be used in load/store instructions on some platforms. So have 3694 // profile_type() compute the address of the profile in a register. 3695 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0, 3696 ret->type(), x->ret(), mdp, 3697 !x->needs_null_check(), 3698 signature_at_call->return_type()->as_klass(), 3699 x->callee()->signature()->return_type()->as_klass()); 3700 if (exact != NULL) { 3701 md->set_return_type(bci, exact); 3702 } 3703 } 3704 } 3705 3706 bool LIRGenerator::profile_inline_klass(ciMethodData* md, ciProfileData* data, Value value, int flag) { 3707 ciKlass* klass = value->as_loaded_klass_or_null(); 3708 if (klass != NULL) { 3709 if (klass->is_inlinetype()) { 3710 profile_flags(md, data, flag, lir_cond_always); 3711 } else if (klass->can_be_inline_klass()) { 3712 return false; 3713 } 3714 } else { 3715 return false; 3716 } 3717 return true; 3718 } 3719 3720 3721 void LIRGenerator::do_ProfileACmpTypes(ProfileACmpTypes* x) { 3722 ciMethod* method = x->method(); 3723 assert(method != NULL, "method should be set if branch is profiled"); 3724 ciMethodData* md = method->method_data_or_null(); 3725 assert(md != NULL, "Sanity"); 3726 ciProfileData* data = md->bci_to_data(x->bci()); 3727 assert(data != NULL, "must have profiling data"); 3728 assert(data->is_ACmpData(), "need BranchData for two-way branches"); 3729 ciACmpData* acmp = (ciACmpData*)data; 3730 LIR_Opr mdp = LIR_OprFact::illegalOpr; 3731 profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 0, 3732 acmp->left()->type(), x->left(), mdp, !x->left_maybe_null(), NULL, NULL); 3733 int flags_offset = md->byte_offset_of_slot(data, DataLayout::flags_offset()); 3734 if (!profile_inline_klass(md, acmp, x->left(), ACmpData::left_inline_type_byte_constant())) { 3735 LIR_Opr mdp = new_register(T_METADATA); 3736 __ metadata2reg(md->constant_encoding(), mdp); 3737 LIRItem value(x->left(), this); 3738 value.load_item(); 3739 __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::left_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null()); 3740 } 3741 profile_type(md, md->byte_offset_of_slot(acmp, ACmpData::left_offset()), 3742 in_bytes(ACmpData::right_offset()) - in_bytes(ACmpData::left_offset()), 3743 acmp->right()->type(), x->right(), mdp, !x->right_maybe_null(), NULL, NULL); 3744 if (!profile_inline_klass(md, acmp, x->right(), ACmpData::right_inline_type_byte_constant())) { 3745 LIR_Opr mdp = new_register(T_METADATA); 3746 __ metadata2reg(md->constant_encoding(), mdp); 3747 LIRItem value(x->right(), this); 3748 value.load_item(); 3749 __ profile_inline_type(new LIR_Address(mdp, flags_offset, T_INT), value.result(), ACmpData::right_inline_type_byte_constant(), new_register(T_INT), !x->left_maybe_null()); 3750 } 3751 } 3752 3753 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) { 3754 // We can safely ignore accessors here, since c2 will inline them anyway, 3755 // accessors are also always mature. 3756 if (!x->inlinee()->is_accessor()) { 3757 CodeEmitInfo* info = state_for(x, x->state(), true); 3758 // Notify the runtime very infrequently only to take care of counter overflows 3759 int freq_log = Tier23InlineeNotifyFreqLog; 3760 double scale; 3761 if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) { 3762 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale); 3763 } 3764 increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true); 3765 } 3766 } 3767 3768 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) { 3769 if (compilation()->is_profiling()) { 3770 #if defined(X86) && !defined(_LP64) 3771 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy. 3772 LIR_Opr left_copy = new_register(left->type()); 3773 __ move(left, left_copy); 3774 __ cmp(cond, left_copy, right); 3775 #else 3776 __ cmp(cond, left, right); 3777 #endif 3778 LIR_Opr step = new_register(T_INT); 3779 LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment); 3780 LIR_Opr zero = LIR_OprFact::intConst(0); 3781 __ cmove(cond, 3782 (left_bci < bci) ? plus_one : zero, 3783 (right_bci < bci) ? plus_one : zero, 3784 step, left->type()); 3785 increment_backedge_counter(info, step, bci); 3786 } 3787 } 3788 3789 3790 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) { 3791 int freq_log = 0; 3792 int level = compilation()->env()->comp_level(); 3793 if (level == CompLevel_limited_profile) { 3794 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog); 3795 } else if (level == CompLevel_full_profile) { 3796 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog); 3797 } else { 3798 ShouldNotReachHere(); 3799 } 3800 // Increment the appropriate invocation/backedge counter and notify the runtime. 3801 double scale; 3802 if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) { 3803 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale); 3804 } 3805 increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true); 3806 } 3807 3808 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, 3809 ciMethod *method, LIR_Opr step, int frequency, 3810 int bci, bool backedge, bool notify) { 3811 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0"); 3812 int level = _compilation->env()->comp_level(); 3813 assert(level > CompLevel_simple, "Shouldn't be here"); 3814 3815 int offset = -1; 3816 LIR_Opr counter_holder; 3817 if (level == CompLevel_limited_profile) { 3818 MethodCounters* counters_adr = method->ensure_method_counters(); 3819 if (counters_adr == NULL) { 3820 bailout("method counters allocation failed"); 3821 return; 3822 } 3823 counter_holder = new_pointer_register(); 3824 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder); 3825 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() : 3826 MethodCounters::invocation_counter_offset()); 3827 } else if (level == CompLevel_full_profile) { 3828 counter_holder = new_register(T_METADATA); 3829 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() : 3830 MethodData::invocation_counter_offset()); 3831 ciMethodData* md = method->method_data_or_null(); 3832 assert(md != NULL, "Sanity"); 3833 __ metadata2reg(md->constant_encoding(), counter_holder); 3834 } else { 3835 ShouldNotReachHere(); 3836 } 3837 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT); 3838 LIR_Opr result = new_register(T_INT); 3839 __ load(counter, result); 3840 __ add(result, step, result); 3841 __ store(result, counter); 3842 if (notify && (!backedge || UseOnStackReplacement)) { 3843 LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding()); 3844 // The bci for info can point to cmp for if's we want the if bci 3845 CodeStub* overflow = new CounterOverflowStub(info, bci, meth); 3846 int freq = frequency << InvocationCounter::count_shift; 3847 if (freq == 0) { 3848 if (!step->is_constant()) { 3849 __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0)); 3850 __ branch(lir_cond_notEqual, overflow); 3851 } else { 3852 __ branch(lir_cond_always, overflow); 3853 } 3854 } else { 3855 LIR_Opr mask = load_immediate(freq, T_INT); 3856 if (!step->is_constant()) { 3857 // If step is 0, make sure the overflow check below always fails 3858 __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0)); 3859 __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT); 3860 } 3861 __ logical_and(result, mask, result); 3862 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0)); 3863 __ branch(lir_cond_equal, overflow); 3864 } 3865 __ branch_destination(overflow->continuation()); 3866 } 3867 } 3868 3869 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) { 3870 LIR_OprList* args = new LIR_OprList(x->number_of_arguments()); 3871 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments()); 3872 3873 if (x->pass_thread()) { 3874 signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 3875 args->append(getThreadPointer()); 3876 } 3877 3878 for (int i = 0; i < x->number_of_arguments(); i++) { 3879 Value a = x->argument_at(i); 3880 LIRItem* item = new LIRItem(a, this); 3881 item->load_item(); 3882 args->append(item->result()); 3883 signature->append(as_BasicType(a->type())); 3884 } 3885 3886 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL); 3887 if (x->type() == voidType) { 3888 set_no_result(x); 3889 } else { 3890 __ move(result, rlock_result(x)); 3891 } 3892 } 3893 3894 #ifdef ASSERT 3895 void LIRGenerator::do_Assert(Assert *x) { 3896 ValueTag tag = x->x()->type()->tag(); 3897 If::Condition cond = x->cond(); 3898 3899 LIRItem xitem(x->x(), this); 3900 LIRItem yitem(x->y(), this); 3901 LIRItem* xin = &xitem; 3902 LIRItem* yin = &yitem; 3903 3904 assert(tag == intTag, "Only integer assertions are valid!"); 3905 3906 xin->load_item(); 3907 yin->dont_load_item(); 3908 3909 set_no_result(x); 3910 3911 LIR_Opr left = xin->result(); 3912 LIR_Opr right = yin->result(); 3913 3914 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true); 3915 } 3916 #endif 3917 3918 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) { 3919 3920 3921 Instruction *a = x->x(); 3922 Instruction *b = x->y(); 3923 if (!a || StressRangeCheckElimination) { 3924 assert(!b || StressRangeCheckElimination, "B must also be null"); 3925 3926 CodeEmitInfo *info = state_for(x, x->state()); 3927 CodeStub* stub = new PredicateFailedStub(info); 3928 3929 __ jump(stub); 3930 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) { 3931 int a_int = a->type()->as_IntConstant()->value(); 3932 int b_int = b->type()->as_IntConstant()->value(); 3933 3934 bool ok = false; 3935 3936 switch(x->cond()) { 3937 case Instruction::eql: ok = (a_int == b_int); break; 3938 case Instruction::neq: ok = (a_int != b_int); break; 3939 case Instruction::lss: ok = (a_int < b_int); break; 3940 case Instruction::leq: ok = (a_int <= b_int); break; 3941 case Instruction::gtr: ok = (a_int > b_int); break; 3942 case Instruction::geq: ok = (a_int >= b_int); break; 3943 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break; 3944 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break; 3945 default: ShouldNotReachHere(); 3946 } 3947 3948 if (ok) { 3949 3950 CodeEmitInfo *info = state_for(x, x->state()); 3951 CodeStub* stub = new PredicateFailedStub(info); 3952 3953 __ jump(stub); 3954 } 3955 } else { 3956 3957 ValueTag tag = x->x()->type()->tag(); 3958 If::Condition cond = x->cond(); 3959 LIRItem xitem(x->x(), this); 3960 LIRItem yitem(x->y(), this); 3961 LIRItem* xin = &xitem; 3962 LIRItem* yin = &yitem; 3963 3964 assert(tag == intTag, "Only integer deoptimizations are valid!"); 3965 3966 xin->load_item(); 3967 yin->dont_load_item(); 3968 set_no_result(x); 3969 3970 LIR_Opr left = xin->result(); 3971 LIR_Opr right = yin->result(); 3972 3973 CodeEmitInfo *info = state_for(x, x->state()); 3974 CodeStub* stub = new PredicateFailedStub(info); 3975 3976 __ cmp(lir_cond(cond), left, right); 3977 __ branch(lir_cond(cond), stub); 3978 } 3979 } 3980 3981 void LIRGenerator::do_blackhole(Intrinsic *x) { 3982 assert(!x->has_receiver(), "Should have been checked before: only static methods here"); 3983 for (int c = 0; c < x->number_of_arguments(); c++) { 3984 // Load the argument 3985 LIRItem vitem(x->argument_at(c), this); 3986 vitem.load_item(); 3987 // ...and leave it unused. 3988 } 3989 } 3990 3991 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { 3992 LIRItemList args(1); 3993 LIRItem value(arg1, this); 3994 args.append(&value); 3995 BasicTypeList signature; 3996 signature.append(as_BasicType(arg1->type())); 3997 3998 return call_runtime(&signature, &args, entry, result_type, info); 3999 } 4000 4001 4002 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) { 4003 LIRItemList args(2); 4004 LIRItem value1(arg1, this); 4005 LIRItem value2(arg2, this); 4006 args.append(&value1); 4007 args.append(&value2); 4008 BasicTypeList signature; 4009 signature.append(as_BasicType(arg1->type())); 4010 signature.append(as_BasicType(arg2->type())); 4011 4012 return call_runtime(&signature, &args, entry, result_type, info); 4013 } 4014 4015 4016 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args, 4017 address entry, ValueType* result_type, CodeEmitInfo* info) { 4018 // get a result register 4019 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 4020 LIR_Opr result = LIR_OprFact::illegalOpr; 4021 if (result_type->tag() != voidTag) { 4022 result = new_register(result_type); 4023 phys_reg = result_register_for(result_type); 4024 } 4025 4026 // move the arguments into the correct location 4027 CallingConvention* cc = frame_map()->c_calling_convention(signature); 4028 assert(cc->length() == args->length(), "argument mismatch"); 4029 for (int i = 0; i < args->length(); i++) { 4030 LIR_Opr arg = args->at(i); 4031 LIR_Opr loc = cc->at(i); 4032 if (loc->is_register()) { 4033 __ move(arg, loc); 4034 } else { 4035 LIR_Address* addr = loc->as_address_ptr(); 4036 // if (!can_store_as_constant(arg)) { 4037 // LIR_Opr tmp = new_register(arg->type()); 4038 // __ move(arg, tmp); 4039 // arg = tmp; 4040 // } 4041 __ move(arg, addr); 4042 } 4043 } 4044 4045 if (info) { 4046 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 4047 } else { 4048 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 4049 } 4050 if (result->is_valid()) { 4051 __ move(phys_reg, result); 4052 } 4053 return result; 4054 } 4055 4056 4057 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args, 4058 address entry, ValueType* result_type, CodeEmitInfo* info) { 4059 // get a result register 4060 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 4061 LIR_Opr result = LIR_OprFact::illegalOpr; 4062 if (result_type->tag() != voidTag) { 4063 result = new_register(result_type); 4064 phys_reg = result_register_for(result_type); 4065 } 4066 4067 // move the arguments into the correct location 4068 CallingConvention* cc = frame_map()->c_calling_convention(signature); 4069 4070 assert(cc->length() == args->length(), "argument mismatch"); 4071 for (int i = 0; i < args->length(); i++) { 4072 LIRItem* arg = args->at(i); 4073 LIR_Opr loc = cc->at(i); 4074 if (loc->is_register()) { 4075 arg->load_item_force(loc); 4076 } else { 4077 LIR_Address* addr = loc->as_address_ptr(); 4078 arg->load_for_store(addr->type()); 4079 __ move(arg->result(), addr); 4080 } 4081 } 4082 4083 if (info) { 4084 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 4085 } else { 4086 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 4087 } 4088 if (result->is_valid()) { 4089 __ move(phys_reg, result); 4090 } 4091 return result; 4092 } 4093 4094 void LIRGenerator::do_MemBar(MemBar* x) { 4095 LIR_Code code = x->code(); 4096 switch(code) { 4097 case lir_membar_acquire : __ membar_acquire(); break; 4098 case lir_membar_release : __ membar_release(); break; 4099 case lir_membar : __ membar(); break; 4100 case lir_membar_loadload : __ membar_loadload(); break; 4101 case lir_membar_storestore: __ membar_storestore(); break; 4102 case lir_membar_loadstore : __ membar_loadstore(); break; 4103 case lir_membar_storeload : __ membar_storeload(); break; 4104 default : ShouldNotReachHere(); break; 4105 } 4106 } 4107 4108 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) { 4109 LIR_Opr value_fixed = rlock_byte(T_BYTE); 4110 if (two_operand_lir_form) { 4111 __ move(value, value_fixed); 4112 __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed); 4113 } else { 4114 __ logical_and(value, LIR_OprFact::intConst(1), value_fixed); 4115 } 4116 LIR_Opr klass = new_register(T_METADATA); 4117 load_klass(array, klass, null_check_info); 4118 null_check_info = NULL; 4119 LIR_Opr layout = new_register(T_INT); 4120 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout); 4121 int diffbit = Klass::layout_helper_boolean_diffbit(); 4122 __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout); 4123 __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0)); 4124 __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE); 4125 value = value_fixed; 4126 return value; 4127 }