1 /* 2 * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2024 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "memory/universe.hpp" 36 #include "nativeInst_s390.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/safepointMechanism.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "utilities/macros.hpp" 43 #include "utilities/powerOfTwo.hpp" 44 #include "vmreg_s390.inline.hpp" 45 46 #define __ _masm-> 47 48 #ifndef PRODUCT 49 #undef __ 50 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm) : _masm)-> 51 #endif 52 53 //------------------------------------------------------------ 54 55 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 56 // Not used on ZARCH_64 57 ShouldNotCallThis(); 58 return false; 59 } 60 61 LIR_Opr LIR_Assembler::receiverOpr() { 62 return FrameMap::Z_R2_oop_opr; 63 } 64 65 LIR_Opr LIR_Assembler::osrBufferPointer() { 66 return FrameMap::Z_R2_opr; 67 } 68 69 int LIR_Assembler::initial_frame_size_in_bytes() const { 70 return in_bytes(frame_map()->framesize_in_bytes()); 71 } 72 73 // Inline cache check: done before the frame is built. 74 // The inline cached class is in Z_inline_cache(Z_R9). 75 // We fetch the class of the receiver and compare it with the cached class. 76 // If they do not match we jump to the slow case. 77 int LIR_Assembler::check_icache() { 78 return __ ic_check(CodeEntryAlignment); 79 } 80 81 void LIR_Assembler::clinit_barrier(ciMethod* method) { 82 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 83 84 Label L_skip_barrier; 85 Register klass = Z_R1_scratch; 86 87 metadata2reg(method->holder()->constant_encoding(), klass); 88 __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/); 89 90 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub()); 91 __ z_br(klass); 92 93 __ bind(L_skip_barrier); 94 } 95 96 void LIR_Assembler::osr_entry() { 97 // On-stack-replacement entry sequence (interpreter frame layout described in frame_s390.hpp): 98 // 99 // 1. Create a new compiled activation. 100 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 101 // at the osr_bci; it is not initialized. 102 // 3. Jump to the continuation address in compiled code to resume execution. 103 104 // OSR entry point 105 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 106 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 107 ValueStack* entry_state = osr_entry->end()->state(); 108 int number_of_locks = entry_state->locks_size(); 109 110 // Create a frame for the compiled activation. 111 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 112 113 // OSR buffer is 114 // 115 // locals[nlocals-1..0] 116 // monitors[number_of_locks-1..0] 117 // 118 // Locals is a direct copy of the interpreter frame so in the osr buffer 119 // the first slot in the local array is the last local from the interpreter 120 // and the last slot is local[0] (receiver) from the interpreter 121 // 122 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 123 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 124 // in the interpreter frame (the method lock if a sync method) 125 126 // Initialize monitors in the compiled activation. 127 // I0: pointer to osr buffer 128 // 129 // All other registers are dead at this point and the locals will be 130 // copied into place by code emitted in the IR. 131 132 Register OSR_buf = osrBufferPointer()->as_register(); 133 { 134 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 135 136 const int locals_space = BytesPerWord * method() -> max_locals(); 137 int monitor_offset = locals_space + (2 * BytesPerWord) * (number_of_locks - 1); 138 bool large_offset = !Immediate::is_simm20(monitor_offset + BytesPerWord) && number_of_locks > 0; 139 140 if (large_offset) { 141 // z_lg can only handle displacement upto 20bit signed binary integer 142 __ z_algfi(OSR_buf, locals_space); 143 monitor_offset -= locals_space; 144 } 145 146 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 147 // the OSR buffer using 2 word entries: first the lock and then 148 // the oop. 149 for (int i = 0; i < number_of_locks; i++) { 150 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 151 // Verify the interpreter's monitor has a non-null object. 152 __ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is null", __LINE__); 153 // Copy the lock field into the compiled activation. 154 __ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf); 155 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i)); 156 __ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf); 157 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i)); 158 } 159 160 if (large_offset) { 161 __ z_slgfi(OSR_buf, locals_space); 162 } 163 } 164 } 165 166 // -------------------------------------------------------------------------------------------- 167 168 address LIR_Assembler::emit_call_c(address a) { 169 __ align_call_far_patchable(__ pc()); 170 address call_addr = __ call_c_opt(a); 171 if (call_addr == nullptr) { 172 bailout("const section overflow"); 173 } 174 return call_addr; 175 } 176 177 int LIR_Assembler::emit_exception_handler() { 178 // Generate code for exception handler. 179 address handler_base = __ start_a_stub(exception_handler_size()); 180 if (handler_base == nullptr) { 181 // Not enough space left for the handler. 182 bailout("exception handler overflow"); 183 return -1; 184 } 185 186 int offset = code_offset(); 187 188 address a = Runtime1::entry_for (C1StubId::handle_exception_from_callee_id); 189 address call_addr = emit_call_c(a); 190 CHECK_BAILOUT_(-1); 191 __ should_not_reach_here(); 192 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 193 __ end_a_stub(); 194 195 return offset; 196 } 197 198 // Emit the code to remove the frame from the stack in the exception 199 // unwind path. 200 int LIR_Assembler::emit_unwind_handler() { 201 #ifndef PRODUCT 202 if (CommentedAssembly) { 203 _masm->block_comment("Unwind handler"); 204 } 205 #endif 206 207 int offset = code_offset(); 208 Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved. 209 Register Rtmp1 = Z_R11; 210 Register Rtmp2 = Z_R12; 211 212 // Fetch the exception from TLS and clear out exception related thread state. 213 Address exc_oop_addr = Address(Z_thread, JavaThread::exception_oop_offset()); 214 Address exc_pc_addr = Address(Z_thread, JavaThread::exception_pc_offset()); 215 __ z_lg(Z_EXC_OOP, exc_oop_addr); 216 __ clear_mem(exc_oop_addr, sizeof(oop)); 217 __ clear_mem(exc_pc_addr, sizeof(intptr_t)); 218 219 __ bind(_unwind_handler_entry); 220 __ verify_not_null_oop(Z_EXC_OOP); 221 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 222 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); // Preserve the exception. 223 } 224 225 // Perform needed unlocking. 226 MonitorExitStub* stub = nullptr; 227 if (method()->is_synchronized()) { 228 // C1StubId::monitorexit_id expects lock address in Z_R1_scratch. 229 LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); 230 monitor_address(0, lock); 231 stub = new MonitorExitStub(lock, true, 0); 232 if (LockingMode == LM_MONITOR) { 233 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 234 } else { 235 __ unlock_object(Rtmp1, Rtmp2, lock->as_register(), *stub->entry()); 236 } 237 __ bind(*stub->continuation()); 238 } 239 240 if (compilation()->env()->dtrace_method_probes()) { 241 ShouldNotReachHere(); // Not supported. 242 #if 0 243 __ mov(rdi, r15_thread); 244 __ mov_metadata(rsi, method()->constant_encoding()); 245 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 246 #endif 247 } 248 249 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 250 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); // Restore the exception. 251 } 252 253 // Remove the activation and dispatch to the unwind handler. 254 __ pop_frame(); 255 __ z_lg(Z_EXC_PC, _z_common_abi(return_pc), Z_SP); 256 257 // Z_EXC_OOP: exception oop 258 // Z_EXC_PC: exception pc 259 260 // Dispatch to the unwind logic. 261 __ load_const_optimized(Z_R5, Runtime1::entry_for (C1StubId::unwind_exception_id)); 262 __ z_br(Z_R5); 263 264 // Emit the slow path assembly. 265 if (stub != nullptr) { 266 stub->emit_code(this); 267 } 268 269 return offset; 270 } 271 272 int LIR_Assembler::emit_deopt_handler() { 273 // Generate code for exception handler. 274 address handler_base = __ start_a_stub(deopt_handler_size()); 275 if (handler_base == nullptr) { 276 // Not enough space left for the handler. 277 bailout("deopt handler overflow"); 278 return -1; 279 } int offset = code_offset(); 280 // Size must be constant (see HandlerImpl::emit_deopt_handler). 281 __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack()); 282 __ call(Z_R1_scratch); 283 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 284 __ end_a_stub(); 285 286 return offset; 287 } 288 289 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 290 if (o == nullptr) { 291 __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove. 292 } else { 293 AddressLiteral a = __ allocate_oop_address(o); 294 bool success = __ load_oop_from_toc(reg, a, reg); 295 if (!success) { 296 bailout("const section overflow"); 297 } 298 } 299 } 300 301 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 302 // Allocate a new index in table to hold the object once it's been patched. 303 int oop_index = __ oop_recorder()->allocate_oop_index(nullptr); 304 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 305 306 AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index)); 307 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 308 // The null will be dynamically patched later so the sequence to 309 // load the address literal must not be optimized. 310 __ load_const(reg, addrlit); 311 312 patching_epilog(patch, lir_patch_normal, reg, info); 313 } 314 315 void LIR_Assembler::metadata2reg(Metadata* md, Register reg) { 316 bool success = __ set_metadata_constant(md, reg); 317 if (!success) { 318 bailout("const section overflow"); 319 return; 320 } 321 } 322 323 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 324 // Allocate a new index in table to hold the klass once it's been patched. 325 int index = __ oop_recorder()->allocate_metadata_index(nullptr); 326 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 327 AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index)); 328 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 329 // The null will be dynamically patched later so the sequence to 330 // load the address literal must not be optimized. 331 __ load_const(reg, addrlit); 332 333 patching_epilog(patch, lir_patch_normal, reg, info); 334 } 335 336 void LIR_Assembler::emit_op3(LIR_Op3* op) { 337 switch (op->code()) { 338 case lir_idiv: 339 case lir_irem: 340 arithmetic_idiv(op->code(), 341 op->in_opr1(), 342 op->in_opr2(), 343 op->in_opr3(), 344 op->result_opr(), 345 op->info()); 346 break; 347 case lir_fmad: { 348 const FloatRegister opr1 = op->in_opr1()->as_double_reg(), 349 opr2 = op->in_opr2()->as_double_reg(), 350 opr3 = op->in_opr3()->as_double_reg(), 351 res = op->result_opr()->as_double_reg(); 352 __ z_madbr(opr3, opr1, opr2); 353 if (res != opr3) { __ z_ldr(res, opr3); } 354 } break; 355 case lir_fmaf: { 356 const FloatRegister opr1 = op->in_opr1()->as_float_reg(), 357 opr2 = op->in_opr2()->as_float_reg(), 358 opr3 = op->in_opr3()->as_float_reg(), 359 res = op->result_opr()->as_float_reg(); 360 __ z_maebr(opr3, opr1, opr2); 361 if (res != opr3) { __ z_ler(res, opr3); } 362 } break; 363 default: ShouldNotReachHere(); break; 364 } 365 } 366 367 368 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 369 #ifdef ASSERT 370 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 371 if (op->block() != nullptr) { _branch_target_blocks.append(op->block()); } 372 if (op->ublock() != nullptr) { _branch_target_blocks.append(op->ublock()); } 373 #endif 374 375 if (op->cond() == lir_cond_always) { 376 if (op->info() != nullptr) { add_debug_info_for_branch(op->info()); } 377 __ branch_optimized(Assembler::bcondAlways, *(op->label())); 378 } else { 379 Assembler::branch_condition acond = Assembler::bcondZero; 380 if (op->code() == lir_cond_float_branch) { 381 assert(op->ublock() != nullptr, "must have unordered successor"); 382 __ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label())); 383 } 384 switch (op->cond()) { 385 case lir_cond_equal: acond = Assembler::bcondEqual; break; 386 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; break; 387 case lir_cond_less: acond = Assembler::bcondLow; break; 388 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; break; 389 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; break; 390 case lir_cond_greater: acond = Assembler::bcondHigh; break; 391 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; break; 392 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; break; 393 default: ShouldNotReachHere(); 394 } 395 __ branch_optimized(acond,*(op->label())); 396 } 397 } 398 399 400 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 401 LIR_Opr src = op->in_opr(); 402 LIR_Opr dest = op->result_opr(); 403 404 switch (op->bytecode()) { 405 case Bytecodes::_i2l: 406 __ move_reg_if_needed(dest->as_register_lo(), T_LONG, src->as_register(), T_INT); 407 break; 408 409 case Bytecodes::_l2i: 410 __ move_reg_if_needed(dest->as_register(), T_INT, src->as_register_lo(), T_LONG); 411 break; 412 413 case Bytecodes::_i2b: 414 __ move_reg_if_needed(dest->as_register(), T_BYTE, src->as_register(), T_INT); 415 break; 416 417 case Bytecodes::_i2c: 418 __ move_reg_if_needed(dest->as_register(), T_CHAR, src->as_register(), T_INT); 419 break; 420 421 case Bytecodes::_i2s: 422 __ move_reg_if_needed(dest->as_register(), T_SHORT, src->as_register(), T_INT); 423 break; 424 425 case Bytecodes::_f2d: 426 assert(dest->is_double_fpu(), "check"); 427 __ move_freg_if_needed(dest->as_double_reg(), T_DOUBLE, src->as_float_reg(), T_FLOAT); 428 break; 429 430 case Bytecodes::_d2f: 431 assert(dest->is_single_fpu(), "check"); 432 __ move_freg_if_needed(dest->as_float_reg(), T_FLOAT, src->as_double_reg(), T_DOUBLE); 433 break; 434 435 case Bytecodes::_i2f: 436 __ z_cefbr(dest->as_float_reg(), src->as_register()); 437 break; 438 439 case Bytecodes::_i2d: 440 __ z_cdfbr(dest->as_double_reg(), src->as_register()); 441 break; 442 443 case Bytecodes::_l2f: 444 __ z_cegbr(dest->as_float_reg(), src->as_register_lo()); 445 break; 446 case Bytecodes::_l2d: 447 __ z_cdgbr(dest->as_double_reg(), src->as_register_lo()); 448 break; 449 450 case Bytecodes::_f2i: 451 case Bytecodes::_f2l: { 452 Label done; 453 FloatRegister Rsrc = src->as_float_reg(); 454 Register Rdst = (op->bytecode() == Bytecodes::_f2i ? dest->as_register() : dest->as_register_lo()); 455 __ clear_reg(Rdst, true, false); 456 __ z_cebr(Rsrc, Rsrc); 457 __ z_brno(done); // NaN -> 0 458 if (op->bytecode() == Bytecodes::_f2i) { 459 __ z_cfebr(Rdst, Rsrc, Assembler::to_zero); 460 } else { // op->bytecode() == Bytecodes::_f2l 461 __ z_cgebr(Rdst, Rsrc, Assembler::to_zero); 462 } 463 __ bind(done); 464 } 465 break; 466 467 case Bytecodes::_d2i: 468 case Bytecodes::_d2l: { 469 Label done; 470 FloatRegister Rsrc = src->as_double_reg(); 471 Register Rdst = (op->bytecode() == Bytecodes::_d2i ? dest->as_register() : dest->as_register_lo()); 472 __ clear_reg(Rdst, true, false); // Don't set CC. 473 __ z_cdbr(Rsrc, Rsrc); 474 __ z_brno(done); // NaN -> 0 475 if (op->bytecode() == Bytecodes::_d2i) { 476 __ z_cfdbr(Rdst, Rsrc, Assembler::to_zero); 477 } else { // Bytecodes::_d2l 478 __ z_cgdbr(Rdst, Rsrc, Assembler::to_zero); 479 } 480 __ bind(done); 481 } 482 break; 483 484 default: ShouldNotReachHere(); 485 } 486 } 487 488 void LIR_Assembler::align_call(LIR_Code code) { 489 // End of call instruction must be 4 byte aligned. 490 int offset = __ offset(); 491 switch (code) { 492 case lir_icvirtual_call: 493 offset += MacroAssembler::load_const_from_toc_size(); 494 // no break 495 case lir_static_call: 496 case lir_optvirtual_call: 497 case lir_dynamic_call: 498 offset += NativeCall::call_far_pcrelative_displacement_offset; 499 break; 500 default: ShouldNotReachHere(); 501 } 502 if ((offset & (NativeCall::call_far_pcrelative_displacement_alignment-1)) != 0) { 503 __ nop(); 504 } 505 } 506 507 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 508 assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0, 509 "must be aligned (offset=%d)", __ offset()); 510 assert(rtype == relocInfo::none || 511 rtype == relocInfo::opt_virtual_call_type || 512 rtype == relocInfo::static_call_type, "unexpected rtype"); 513 // Prepend each BRASL with a nop. 514 __ relocate(rtype); 515 __ z_nop(); 516 __ z_brasl(Z_R14, op->addr()); 517 add_call_info(code_offset(), op->info()); 518 } 519 520 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 521 address virtual_call_oop_addr = nullptr; 522 AddressLiteral empty_ic((address) Universe::non_oop_word()); 523 virtual_call_oop_addr = __ pc(); 524 bool success = __ load_const_from_toc(Z_inline_cache, empty_ic); 525 if (!success) { 526 bailout("const section overflow"); 527 return; 528 } 529 530 // CALL to fixup routine. Fixup routine uses ScopeDesc info 531 // to determine who we intended to call. 532 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); 533 call(op, relocInfo::none); 534 } 535 536 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 537 if (from_reg != to_reg) __ z_lgr(to_reg, from_reg); 538 } 539 540 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 541 assert(src->is_constant(), "should not call otherwise"); 542 assert(dest->is_stack(), "should not call otherwise"); 543 LIR_Const* c = src->as_constant_ptr(); 544 545 unsigned int lmem = 0; 546 unsigned int lcon = 0; 547 int64_t cbits = 0; 548 Address dest_addr; 549 switch (c->type()) { 550 case T_INT: // fall through 551 case T_FLOAT: 552 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 553 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 554 break; 555 556 case T_ADDRESS: 557 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 558 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 559 break; 560 561 case T_OBJECT: 562 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 563 if (c->as_jobject() == nullptr) { 564 __ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8); 565 } else { 566 jobject2reg(c->as_jobject(), Z_R1_scratch); 567 __ reg2mem_opt(Z_R1_scratch, dest_addr, true); 568 } 569 return; 570 571 case T_LONG: // fall through 572 case T_DOUBLE: 573 dest_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 574 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 575 break; 576 577 default: 578 ShouldNotReachHere(); 579 } 580 581 __ store_const(dest_addr, cbits, lmem, lcon); 582 } 583 584 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 585 assert(src->is_constant(), "should not call otherwise"); 586 assert(dest->is_address(), "should not call otherwise"); 587 588 LIR_Const* c = src->as_constant_ptr(); 589 Address addr = as_Address(dest->as_address_ptr()); 590 591 int store_offset = -1; 592 593 if (dest->as_address_ptr()->index()->is_valid()) { 594 switch (type) { 595 case T_INT: // fall through 596 case T_FLOAT: 597 __ load_const_optimized(Z_R0_scratch, c->as_jint_bits()); 598 store_offset = __ offset(); 599 if (Immediate::is_uimm12(addr.disp())) { 600 __ z_st(Z_R0_scratch, addr); 601 } else { 602 __ z_sty(Z_R0_scratch, addr); 603 } 604 break; 605 606 case T_ADDRESS: 607 __ load_const_optimized(Z_R1_scratch, c->as_jint_bits()); 608 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 609 break; 610 611 case T_OBJECT: // fall through 612 case T_ARRAY: 613 if (c->as_jobject() == nullptr) { 614 if (UseCompressedOops && !wide) { 615 __ clear_reg(Z_R1_scratch, false); 616 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 617 } else { 618 __ clear_reg(Z_R1_scratch, true); 619 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 620 } 621 } else { 622 jobject2reg(c->as_jobject(), Z_R1_scratch); 623 if (UseCompressedOops && !wide) { 624 __ encode_heap_oop(Z_R1_scratch); 625 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 626 } else { 627 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 628 } 629 } 630 assert(store_offset >= 0, "check"); 631 break; 632 633 case T_LONG: // fall through 634 case T_DOUBLE: 635 __ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits())); 636 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 637 break; 638 639 case T_BOOLEAN: // fall through 640 case T_BYTE: 641 __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint())); 642 store_offset = __ offset(); 643 if (Immediate::is_uimm12(addr.disp())) { 644 __ z_stc(Z_R0_scratch, addr); 645 } else { 646 __ z_stcy(Z_R0_scratch, addr); 647 } 648 break; 649 650 case T_CHAR: // fall through 651 case T_SHORT: 652 __ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint())); 653 store_offset = __ offset(); 654 if (Immediate::is_uimm12(addr.disp())) { 655 __ z_sth(Z_R0_scratch, addr); 656 } else { 657 __ z_sthy(Z_R0_scratch, addr); 658 } 659 break; 660 661 default: 662 ShouldNotReachHere(); 663 } 664 665 } else { // no index 666 667 unsigned int lmem = 0; 668 unsigned int lcon = 0; 669 int64_t cbits = 0; 670 671 switch (type) { 672 case T_INT: // fall through 673 case T_FLOAT: 674 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 675 break; 676 677 case T_ADDRESS: 678 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 679 break; 680 681 case T_OBJECT: // fall through 682 case T_ARRAY: 683 if (c->as_jobject() == nullptr) { 684 if (UseCompressedOops && !wide) { 685 store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4); 686 } else { 687 store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8); 688 } 689 } else { 690 jobject2reg(c->as_jobject(), Z_R1_scratch); 691 if (UseCompressedOops && !wide) { 692 __ encode_heap_oop(Z_R1_scratch); 693 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 694 } else { 695 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 696 } 697 } 698 assert(store_offset >= 0, "check"); 699 break; 700 701 case T_LONG: // fall through 702 case T_DOUBLE: 703 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 704 break; 705 706 case T_BOOLEAN: // fall through 707 case T_BYTE: 708 lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint()); 709 break; 710 711 case T_CHAR: // fall through 712 case T_SHORT: 713 lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint()); 714 break; 715 716 default: 717 ShouldNotReachHere(); 718 } 719 720 if (store_offset == -1) { 721 store_offset = __ store_const(addr, cbits, lmem, lcon); 722 assert(store_offset >= 0, "check"); 723 } 724 } 725 726 if (info != nullptr) { 727 add_debug_info_for_null_check(store_offset, info); 728 } 729 } 730 731 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 732 assert(src->is_constant(), "should not call otherwise"); 733 assert(dest->is_register(), "should not call otherwise"); 734 LIR_Const* c = src->as_constant_ptr(); 735 736 switch (c->type()) { 737 case T_INT: { 738 assert(patch_code == lir_patch_none, "no patching handled here"); 739 __ load_const_optimized(dest->as_register(), c->as_jint()); 740 break; 741 } 742 743 case T_ADDRESS: { 744 assert(patch_code == lir_patch_none, "no patching handled here"); 745 __ load_const_optimized(dest->as_register(), c->as_jint()); 746 break; 747 } 748 749 case T_LONG: { 750 assert(patch_code == lir_patch_none, "no patching handled here"); 751 __ load_const_optimized(dest->as_register_lo(), (intptr_t)c->as_jlong()); 752 break; 753 } 754 755 case T_OBJECT: { 756 if (patch_code != lir_patch_none) { 757 jobject2reg_with_patching(dest->as_register(), info); 758 } else { 759 jobject2reg(c->as_jobject(), dest->as_register()); 760 } 761 break; 762 } 763 764 case T_METADATA: { 765 if (patch_code != lir_patch_none) { 766 klass2reg_with_patching(dest->as_register(), info); 767 } else { 768 metadata2reg(c->as_metadata(), dest->as_register()); 769 } 770 break; 771 } 772 773 case T_FLOAT: { 774 Register toc_reg = Z_R1_scratch; 775 __ load_toc(toc_reg); 776 address const_addr = __ float_constant(c->as_jfloat()); 777 if (const_addr == nullptr) { 778 bailout("const section overflow"); 779 break; 780 } 781 int displ = const_addr - _masm->code()->consts()->start(); 782 if (dest->is_single_fpu()) { 783 __ z_ley(dest->as_float_reg(), displ, toc_reg); 784 } else { 785 assert(dest->is_single_cpu(), "Must be a cpu register."); 786 __ z_ly(dest->as_register(), displ, toc_reg); 787 } 788 } 789 break; 790 791 case T_DOUBLE: { 792 Register toc_reg = Z_R1_scratch; 793 __ load_toc(toc_reg); 794 address const_addr = __ double_constant(c->as_jdouble()); 795 if (const_addr == nullptr) { 796 bailout("const section overflow"); 797 break; 798 } 799 int displ = const_addr - _masm->code()->consts()->start(); 800 if (dest->is_double_fpu()) { 801 __ z_ldy(dest->as_double_reg(), displ, toc_reg); 802 } else { 803 assert(dest->is_double_cpu(), "Must be a long register."); 804 __ z_lg(dest->as_register_lo(), displ, toc_reg); 805 } 806 } 807 break; 808 809 default: 810 ShouldNotReachHere(); 811 } 812 } 813 814 Address LIR_Assembler::as_Address(LIR_Address* addr) { 815 if (addr->base()->is_illegal()) { 816 Unimplemented(); 817 } 818 819 Register base = addr->base()->as_pointer_register(); 820 821 if (addr->index()->is_illegal()) { 822 return Address(base, addr->disp()); 823 } else if (addr->index()->is_cpu_register()) { 824 Register index = addr->index()->as_pointer_register(); 825 return Address(base, index, addr->disp()); 826 } else if (addr->index()->is_constant()) { 827 intptr_t addr_offset = addr->index()->as_constant_ptr()->as_jint() + addr->disp(); 828 return Address(base, addr_offset); 829 } else { 830 ShouldNotReachHere(); 831 return Address(); 832 } 833 } 834 835 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 836 switch (type) { 837 case T_INT: 838 case T_FLOAT: { 839 Register tmp = Z_R1_scratch; 840 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 841 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 842 __ mem2reg_opt(tmp, from, false); 843 __ reg2mem_opt(tmp, to, false); 844 break; 845 } 846 case T_ADDRESS: 847 case T_OBJECT: { 848 Register tmp = Z_R1_scratch; 849 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 850 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 851 __ mem2reg_opt(tmp, from, true); 852 __ reg2mem_opt(tmp, to, true); 853 break; 854 } 855 case T_LONG: 856 case T_DOUBLE: { 857 Register tmp = Z_R1_scratch; 858 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 859 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 860 __ mem2reg_opt(tmp, from, true); 861 __ reg2mem_opt(tmp, to, true); 862 break; 863 } 864 865 default: 866 ShouldNotReachHere(); 867 } 868 } 869 870 // 4-byte accesses only! Don't use it to access 8 bytes! 871 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 872 ShouldNotCallThis(); 873 return Address(); // unused 874 } 875 876 // 4-byte accesses only! Don't use it to access 8 bytes! 877 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 878 ShouldNotCallThis(); 879 return Address(); // unused 880 } 881 882 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, 883 CodeEmitInfo* info, bool wide) { 884 885 assert(type != T_METADATA, "load of metadata ptr not supported"); 886 LIR_Address* addr = src_opr->as_address_ptr(); 887 LIR_Opr to_reg = dest; 888 889 Register src = addr->base()->as_pointer_register(); 890 Register disp_reg = Z_R0; 891 int disp_value = addr->disp(); 892 bool needs_patching = (patch_code != lir_patch_none); 893 894 if (addr->base()->type() == T_OBJECT) { 895 __ verify_oop(src, FILE_AND_LINE); 896 } 897 898 PatchingStub* patch = nullptr; 899 if (needs_patching) { 900 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 901 assert(!to_reg->is_double_cpu() || 902 patch_code == lir_patch_none || 903 patch_code == lir_patch_normal, "patching doesn't match register"); 904 } 905 906 if (addr->index()->is_illegal()) { 907 if (!Immediate::is_simm20(disp_value)) { 908 if (needs_patching) { 909 __ load_const(Z_R1_scratch, (intptr_t)0); 910 } else { 911 __ load_const_optimized(Z_R1_scratch, disp_value); 912 } 913 disp_reg = Z_R1_scratch; 914 disp_value = 0; 915 } 916 } else { 917 if (!Immediate::is_simm20(disp_value)) { 918 __ load_const_optimized(Z_R1_scratch, disp_value); 919 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 920 disp_reg = Z_R1_scratch; 921 disp_value = 0; 922 } 923 disp_reg = addr->index()->as_pointer_register(); 924 } 925 926 // Remember the offset of the load. The patching_epilog must be done 927 // before the call to add_debug_info, otherwise the PcDescs don't get 928 // entered in increasing order. 929 int offset = code_offset(); 930 931 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 932 933 bool short_disp = Immediate::is_uimm12(disp_value); 934 935 switch (type) { 936 case T_BOOLEAN: // fall through 937 case T_BYTE : __ z_lb(dest->as_register(), disp_value, disp_reg, src); break; 938 case T_CHAR : __ z_llgh(dest->as_register(), disp_value, disp_reg, src); break; 939 case T_SHORT : 940 if (short_disp) { 941 __ z_lh(dest->as_register(), disp_value, disp_reg, src); 942 } else { 943 __ z_lhy(dest->as_register(), disp_value, disp_reg, src); 944 } 945 break; 946 case T_INT : 947 if (short_disp) { 948 __ z_l(dest->as_register(), disp_value, disp_reg, src); 949 } else { 950 __ z_ly(dest->as_register(), disp_value, disp_reg, src); 951 } 952 break; 953 case T_ADDRESS: 954 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 955 break; 956 case T_ARRAY : // fall through 957 case T_OBJECT: 958 { 959 if (UseCompressedOops && !wide) { 960 __ z_llgf(dest->as_register(), disp_value, disp_reg, src); 961 __ oop_decoder(dest->as_register(), dest->as_register(), true); 962 } else { 963 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 964 } 965 __ verify_oop(dest->as_register(), FILE_AND_LINE); 966 break; 967 } 968 case T_FLOAT: 969 if (short_disp) { 970 __ z_le(dest->as_float_reg(), disp_value, disp_reg, src); 971 } else { 972 __ z_ley(dest->as_float_reg(), disp_value, disp_reg, src); 973 } 974 break; 975 case T_DOUBLE: 976 if (short_disp) { 977 __ z_ld(dest->as_double_reg(), disp_value, disp_reg, src); 978 } else { 979 __ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src); 980 } 981 break; 982 case T_LONG : __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break; 983 default : ShouldNotReachHere(); 984 } 985 986 if (patch != nullptr) { 987 patching_epilog(patch, patch_code, src, info); 988 } 989 if (info != nullptr) add_debug_info_for_null_check(offset, info); 990 } 991 992 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 993 assert(src->is_stack(), "should not call otherwise"); 994 assert(dest->is_register(), "should not call otherwise"); 995 996 if (dest->is_single_cpu()) { 997 if (is_reference_type(type)) { 998 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 999 __ verify_oop(dest->as_register(), FILE_AND_LINE); 1000 } else if (type == T_METADATA || type == T_ADDRESS) { 1001 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 1002 } else { 1003 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false); 1004 } 1005 } else if (dest->is_double_cpu()) { 1006 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix()); 1007 __ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true); 1008 } else if (dest->is_single_fpu()) { 1009 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1010 __ mem2freg_opt(dest->as_float_reg(), src_addr, false); 1011 } else if (dest->is_double_fpu()) { 1012 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1013 __ mem2freg_opt(dest->as_double_reg(), src_addr, true); 1014 } else { 1015 ShouldNotReachHere(); 1016 } 1017 } 1018 1019 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1020 assert(src->is_register(), "should not call otherwise"); 1021 assert(dest->is_stack(), "should not call otherwise"); 1022 1023 if (src->is_single_cpu()) { 1024 const Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 1025 if (is_reference_type(type)) { 1026 __ verify_oop(src->as_register(), FILE_AND_LINE); 1027 __ reg2mem_opt(src->as_register(), dst, true); 1028 } else if (type == T_METADATA || type == T_ADDRESS) { 1029 __ reg2mem_opt(src->as_register(), dst, true); 1030 } else { 1031 __ reg2mem_opt(src->as_register(), dst, false); 1032 } 1033 } else if (src->is_double_cpu()) { 1034 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix()); 1035 __ reg2mem_opt(src->as_register_lo(), dstLO, true); 1036 } else if (src->is_single_fpu()) { 1037 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1038 __ freg2mem_opt(src->as_float_reg(), dst_addr, false); 1039 } else if (src->is_double_fpu()) { 1040 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1041 __ freg2mem_opt(src->as_double_reg(), dst_addr, true); 1042 } else { 1043 ShouldNotReachHere(); 1044 } 1045 } 1046 1047 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1048 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1049 if (from_reg->is_double_fpu()) { 1050 // double to double moves 1051 assert(to_reg->is_double_fpu(), "should match"); 1052 __ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg()); 1053 } else { 1054 // float to float moves 1055 assert(to_reg->is_single_fpu(), "should match"); 1056 __ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg()); 1057 } 1058 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1059 if (from_reg->is_double_cpu()) { 1060 __ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1061 } else if (to_reg->is_double_cpu()) { 1062 // int to int moves 1063 __ z_lgr(to_reg->as_register_lo(), from_reg->as_register()); 1064 } else { 1065 // int to int moves 1066 __ z_lgr(to_reg->as_register(), from_reg->as_register()); 1067 } 1068 } else { 1069 ShouldNotReachHere(); 1070 } 1071 if (is_reference_type(to_reg->type())) { 1072 __ verify_oop(to_reg->as_register(), FILE_AND_LINE); 1073 } 1074 } 1075 1076 void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type, 1077 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1078 bool wide) { 1079 assert(type != T_METADATA, "store of metadata ptr not supported"); 1080 LIR_Address* addr = dest_opr->as_address_ptr(); 1081 1082 Register dest = addr->base()->as_pointer_register(); 1083 Register disp_reg = Z_R0; 1084 int disp_value = addr->disp(); 1085 bool needs_patching = (patch_code != lir_patch_none); 1086 1087 if (addr->base()->is_oop_register()) { 1088 __ verify_oop(dest, FILE_AND_LINE); 1089 } 1090 1091 PatchingStub* patch = nullptr; 1092 if (needs_patching) { 1093 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1094 assert(!from->is_double_cpu() || 1095 patch_code == lir_patch_none || 1096 patch_code == lir_patch_normal, "patching doesn't match register"); 1097 } 1098 1099 assert(!needs_patching || (!Immediate::is_simm20(disp_value) && addr->index()->is_illegal()), "assumption"); 1100 if (addr->index()->is_illegal()) { 1101 if (!Immediate::is_simm20(disp_value)) { 1102 if (needs_patching) { 1103 __ load_const(Z_R1_scratch, (intptr_t)0); 1104 } else { 1105 __ load_const_optimized(Z_R1_scratch, disp_value); 1106 } 1107 disp_reg = Z_R1_scratch; 1108 disp_value = 0; 1109 } 1110 } else { 1111 if (!Immediate::is_simm20(disp_value)) { 1112 __ load_const_optimized(Z_R1_scratch, disp_value); 1113 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 1114 disp_reg = Z_R1_scratch; 1115 disp_value = 0; 1116 } 1117 disp_reg = addr->index()->as_pointer_register(); 1118 } 1119 1120 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 1121 1122 if (is_reference_type(type)) { 1123 __ verify_oop(from->as_register(), FILE_AND_LINE); 1124 } 1125 1126 bool short_disp = Immediate::is_uimm12(disp_value); 1127 1128 // Remember the offset of the store. The patching_epilog must be done 1129 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1130 // entered in increasing order. 1131 int offset = code_offset(); 1132 switch (type) { 1133 case T_BOOLEAN: // fall through 1134 case T_BYTE : 1135 if (short_disp) { 1136 __ z_stc(from->as_register(), disp_value, disp_reg, dest); 1137 } else { 1138 __ z_stcy(from->as_register(), disp_value, disp_reg, dest); 1139 } 1140 break; 1141 case T_CHAR : // fall through 1142 case T_SHORT : 1143 if (short_disp) { 1144 __ z_sth(from->as_register(), disp_value, disp_reg, dest); 1145 } else { 1146 __ z_sthy(from->as_register(), disp_value, disp_reg, dest); 1147 } 1148 break; 1149 case T_INT : 1150 if (short_disp) { 1151 __ z_st(from->as_register(), disp_value, disp_reg, dest); 1152 } else { 1153 __ z_sty(from->as_register(), disp_value, disp_reg, dest); 1154 } 1155 break; 1156 case T_LONG : __ z_stg(from->as_register_lo(), disp_value, disp_reg, dest); break; 1157 case T_ADDRESS: __ z_stg(from->as_register(), disp_value, disp_reg, dest); break; 1158 break; 1159 case T_ARRAY : // fall through 1160 case T_OBJECT: 1161 { 1162 if (UseCompressedOops && !wide) { 1163 Register compressed_src = Z_R14; 1164 __ oop_encoder(compressed_src, from->as_register(), true, (disp_reg != Z_R1) ? Z_R1 : Z_R0, -1, true); 1165 offset = code_offset(); 1166 if (short_disp) { 1167 __ z_st(compressed_src, disp_value, disp_reg, dest); 1168 } else { 1169 __ z_sty(compressed_src, disp_value, disp_reg, dest); 1170 } 1171 } else { 1172 __ z_stg(from->as_register(), disp_value, disp_reg, dest); 1173 } 1174 break; 1175 } 1176 case T_FLOAT : 1177 if (short_disp) { 1178 __ z_ste(from->as_float_reg(), disp_value, disp_reg, dest); 1179 } else { 1180 __ z_stey(from->as_float_reg(), disp_value, disp_reg, dest); 1181 } 1182 break; 1183 case T_DOUBLE: 1184 if (short_disp) { 1185 __ z_std(from->as_double_reg(), disp_value, disp_reg, dest); 1186 } else { 1187 __ z_stdy(from->as_double_reg(), disp_value, disp_reg, dest); 1188 } 1189 break; 1190 default: ShouldNotReachHere(); 1191 } 1192 1193 if (patch != nullptr) { 1194 patching_epilog(patch, patch_code, dest, info); 1195 } 1196 1197 if (info != nullptr) add_debug_info_for_null_check(offset, info); 1198 } 1199 1200 1201 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 1202 assert(result->is_illegal() || 1203 (result->is_single_cpu() && result->as_register() == Z_R2) || 1204 (result->is_double_cpu() && result->as_register_lo() == Z_R2) || 1205 (result->is_single_fpu() && result->as_float_reg() == Z_F0) || 1206 (result->is_double_fpu() && result->as_double_reg() == Z_F0), "convention"); 1207 1208 __ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::polling_page_offset())); 1209 1210 // Pop the frame before the safepoint code. 1211 __ pop_frame_restore_retPC(initial_frame_size_in_bytes()); 1212 1213 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1214 __ reserved_stack_check(Z_R14); 1215 } 1216 1217 // We need to mark the code position where the load from the safepoint 1218 // polling page was emitted as relocInfo::poll_return_type here. 1219 __ relocate(relocInfo::poll_return_type); 1220 __ load_from_polling_page(Z_R1_scratch); 1221 1222 __ z_br(Z_R14); // Return to caller. 1223 } 1224 1225 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1226 const Register poll_addr = tmp->as_register_lo(); 1227 __ z_lg(poll_addr, Address(Z_thread, JavaThread::polling_page_offset())); 1228 guarantee(info != nullptr, "Shouldn't be null"); 1229 add_debug_info_for_branch(info); 1230 int offset = __ offset(); 1231 __ relocate(relocInfo::poll_type); 1232 __ load_from_polling_page(poll_addr); 1233 return offset; 1234 } 1235 1236 void LIR_Assembler::emit_static_call_stub() { 1237 1238 // Stub is fixed up when the corresponding call is converted from calling 1239 // compiled code to calling interpreted code. 1240 1241 address call_pc = __ pc(); 1242 address stub = __ start_a_stub(call_stub_size()); 1243 if (stub == nullptr) { 1244 bailout("static call stub overflow"); 1245 return; 1246 } 1247 1248 int start = __ offset(); 1249 1250 __ relocate(static_stub_Relocation::spec(call_pc)); 1251 1252 // See also Matcher::interpreter_method_reg(). 1253 AddressLiteral meta = __ allocate_metadata_address(nullptr); 1254 bool success = __ load_const_from_toc(Z_method, meta); 1255 1256 __ set_inst_mark(); 1257 AddressLiteral a((address)-1); 1258 success = success && __ load_const_from_toc(Z_R1, a); 1259 if (!success) { 1260 bailout("const section overflow"); 1261 return; 1262 } 1263 1264 __ z_br(Z_R1); 1265 assert(__ offset() - start <= call_stub_size(), "stub too big"); 1266 __ end_a_stub(); // Update current stubs pointer and restore insts_end. 1267 } 1268 1269 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1270 bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual; 1271 if (opr1->is_single_cpu()) { 1272 Register reg1 = opr1->as_register(); 1273 if (opr2->is_single_cpu()) { 1274 // cpu register - cpu register 1275 if (is_reference_type(opr1->type())) { 1276 __ z_clgr(reg1, opr2->as_register()); 1277 } else { 1278 assert(!is_reference_type(opr2->type()), "cmp int, oop?"); 1279 if (unsigned_comp) { 1280 __ z_clr(reg1, opr2->as_register()); 1281 } else { 1282 __ z_cr(reg1, opr2->as_register()); 1283 } 1284 } 1285 } else if (opr2->is_stack()) { 1286 // cpu register - stack 1287 if (is_reference_type(opr1->type())) { 1288 __ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1289 } else { 1290 if (unsigned_comp) { 1291 __ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1292 } else { 1293 __ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1294 } 1295 } 1296 } else if (opr2->is_constant()) { 1297 // cpu register - constant 1298 LIR_Const* c = opr2->as_constant_ptr(); 1299 if (c->type() == T_INT) { 1300 if (unsigned_comp) { 1301 __ z_clfi(reg1, c->as_jint()); 1302 } else { 1303 __ z_cfi(reg1, c->as_jint()); 1304 } 1305 } else if (c->type() == T_METADATA) { 1306 // We only need, for now, comparison with null for metadata. 1307 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1308 Metadata* m = c->as_metadata(); 1309 if (m == nullptr) { 1310 __ z_cghi(reg1, 0); 1311 } else { 1312 ShouldNotReachHere(); 1313 } 1314 } else if (is_reference_type(c->type())) { 1315 // In 64bit oops are single register. 1316 jobject o = c->as_jobject(); 1317 if (o == nullptr) { 1318 __ z_ltgr(reg1, reg1); 1319 } else { 1320 jobject2reg(o, Z_R1_scratch); 1321 __ z_cgr(reg1, Z_R1_scratch); 1322 } 1323 } else { 1324 fatal("unexpected type: %s", basictype_to_str(c->type())); 1325 } 1326 // cpu register - address 1327 } else if (opr2->is_address()) { 1328 if (op->info() != nullptr) { 1329 add_debug_info_for_null_check_here(op->info()); 1330 } 1331 if (unsigned_comp) { 1332 __ z_cly(reg1, as_Address(opr2->as_address_ptr())); 1333 } else { 1334 __ z_cy(reg1, as_Address(opr2->as_address_ptr())); 1335 } 1336 } else { 1337 ShouldNotReachHere(); 1338 } 1339 1340 } else if (opr1->is_double_cpu()) { 1341 assert(!unsigned_comp, "unexpected"); 1342 Register xlo = opr1->as_register_lo(); 1343 Register xhi = opr1->as_register_hi(); 1344 if (opr2->is_double_cpu()) { 1345 __ z_cgr(xlo, opr2->as_register_lo()); 1346 } else if (opr2->is_constant()) { 1347 // cpu register - constant 0 1348 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 1349 __ z_ltgr(xlo, xlo); 1350 } else { 1351 ShouldNotReachHere(); 1352 } 1353 1354 } else if (opr1->is_single_fpu()) { 1355 if (opr2->is_single_fpu()) { 1356 __ z_cebr(opr1->as_float_reg(), opr2->as_float_reg()); 1357 } else { 1358 // stack slot 1359 Address addr = frame_map()->address_for_slot(opr2->single_stack_ix()); 1360 if (Immediate::is_uimm12(addr.disp())) { 1361 __ z_ceb(opr1->as_float_reg(), addr); 1362 } else { 1363 __ z_ley(Z_fscratch_1, addr); 1364 __ z_cebr(opr1->as_float_reg(), Z_fscratch_1); 1365 } 1366 } 1367 } else if (opr1->is_double_fpu()) { 1368 if (opr2->is_double_fpu()) { 1369 __ z_cdbr(opr1->as_double_reg(), opr2->as_double_reg()); 1370 } else { 1371 // stack slot 1372 Address addr = frame_map()->address_for_slot(opr2->double_stack_ix()); 1373 if (Immediate::is_uimm12(addr.disp())) { 1374 __ z_cdb(opr1->as_double_reg(), addr); 1375 } else { 1376 __ z_ldy(Z_fscratch_1, addr); 1377 __ z_cdbr(opr1->as_double_reg(), Z_fscratch_1); 1378 } 1379 } 1380 } else { 1381 ShouldNotReachHere(); 1382 } 1383 } 1384 1385 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1386 Label done; 1387 Register dreg = dst->as_register(); 1388 1389 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1390 assert((left->is_single_fpu() && right->is_single_fpu()) || 1391 (left->is_double_fpu() && right->is_double_fpu()), "unexpected operand types"); 1392 bool is_single = left->is_single_fpu(); 1393 bool is_unordered_less = (code == lir_ucmp_fd2i); 1394 FloatRegister lreg = is_single ? left->as_float_reg() : left->as_double_reg(); 1395 FloatRegister rreg = is_single ? right->as_float_reg() : right->as_double_reg(); 1396 if (is_single) { 1397 __ z_cebr(lreg, rreg); 1398 } else { 1399 __ z_cdbr(lreg, rreg); 1400 } 1401 if (VM_Version::has_LoadStoreConditional()) { 1402 Register one = Z_R0_scratch; 1403 Register minus_one = Z_R1_scratch; 1404 __ z_lghi(minus_one, -1); 1405 __ z_lghi(one, 1); 1406 __ z_lghi(dreg, 0); 1407 __ z_locgr(dreg, one, is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered); 1408 __ z_locgr(dreg, minus_one, is_unordered_less ? Assembler::bcondLowOrNotOrdered : Assembler::bcondLow); 1409 } else { 1410 __ clear_reg(dreg, true, false); 1411 __ z_bre(done); // if (left == right) dst = 0 1412 1413 // if (left > right || ((code ~= cmpg) && (left <> right)) dst := 1 1414 __ z_lhi(dreg, 1); 1415 __ z_brc(is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered, done); 1416 1417 // if (left < right || ((code ~= cmpl) && (left <> right)) dst := -1 1418 __ z_lhi(dreg, -1); 1419 } 1420 } else { 1421 assert(code == lir_cmp_l2i, "check"); 1422 if (VM_Version::has_LoadStoreConditional()) { 1423 Register one = Z_R0_scratch; 1424 Register minus_one = Z_R1_scratch; 1425 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1426 __ z_lghi(minus_one, -1); 1427 __ z_lghi(one, 1); 1428 __ z_lghi(dreg, 0); 1429 __ z_locgr(dreg, one, Assembler::bcondHigh); 1430 __ z_locgr(dreg, minus_one, Assembler::bcondLow); 1431 } else { 1432 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1433 __ z_lghi(dreg, 0); // eq value 1434 __ z_bre(done); 1435 __ z_lghi(dreg, 1); // gt value 1436 __ z_brh(done); 1437 __ z_lghi(dreg, -1); // lt value 1438 } 1439 } 1440 __ bind(done); 1441 } 1442 1443 // result = condition ? opr1 : opr2 1444 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 1445 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 1446 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on s390"); 1447 1448 Assembler::branch_condition acond = Assembler::bcondEqual, ncond = Assembler::bcondNotEqual; 1449 switch (condition) { 1450 case lir_cond_equal: acond = Assembler::bcondEqual; ncond = Assembler::bcondNotEqual; break; 1451 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; ncond = Assembler::bcondEqual; break; 1452 case lir_cond_less: acond = Assembler::bcondLow; ncond = Assembler::bcondNotLow; break; 1453 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1454 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1455 case lir_cond_greater: acond = Assembler::bcondHigh; ncond = Assembler::bcondNotHigh; break; 1456 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1457 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1458 default: ShouldNotReachHere(); 1459 } 1460 1461 if (opr1->is_cpu_register()) { 1462 reg2reg(opr1, result); 1463 } else if (opr1->is_stack()) { 1464 stack2reg(opr1, result, result->type()); 1465 } else if (opr1->is_constant()) { 1466 const2reg(opr1, result, lir_patch_none, nullptr); 1467 } else { 1468 ShouldNotReachHere(); 1469 } 1470 1471 if (VM_Version::has_LoadStoreConditional() && !opr2->is_constant()) { 1472 // Optimized version that does not require a branch. 1473 if (opr2->is_single_cpu()) { 1474 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 1475 __ z_locgr(result->as_register(), opr2->as_register(), ncond); 1476 } else if (opr2->is_double_cpu()) { 1477 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1478 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1479 __ z_locgr(result->as_register_lo(), opr2->as_register_lo(), ncond); 1480 } else if (opr2->is_single_stack()) { 1481 __ z_loc(result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()), ncond); 1482 } else if (opr2->is_double_stack()) { 1483 __ z_locg(result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix()), ncond); 1484 } else { 1485 ShouldNotReachHere(); 1486 } 1487 } else { 1488 Label skip; 1489 __ z_brc(acond, skip); 1490 if (opr2->is_cpu_register()) { 1491 reg2reg(opr2, result); 1492 } else if (opr2->is_stack()) { 1493 stack2reg(opr2, result, result->type()); 1494 } else if (opr2->is_constant()) { 1495 const2reg(opr2, result, lir_patch_none, nullptr); 1496 } else { 1497 ShouldNotReachHere(); 1498 } 1499 __ bind(skip); 1500 } 1501 } 1502 1503 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1504 CodeEmitInfo* info, bool pop_fpu_stack) { 1505 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1506 1507 if (left->is_single_cpu()) { 1508 assert(left == dest, "left and dest must be equal"); 1509 Register lreg = left->as_register(); 1510 1511 if (right->is_single_cpu()) { 1512 // cpu register - cpu register 1513 Register rreg = right->as_register(); 1514 switch (code) { 1515 case lir_add: __ z_ar (lreg, rreg); break; 1516 case lir_sub: __ z_sr (lreg, rreg); break; 1517 case lir_mul: __ z_msr(lreg, rreg); break; 1518 default: ShouldNotReachHere(); 1519 } 1520 1521 } else if (right->is_stack()) { 1522 // cpu register - stack 1523 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1524 switch (code) { 1525 case lir_add: __ z_ay(lreg, raddr); break; 1526 case lir_sub: __ z_sy(lreg, raddr); break; 1527 default: ShouldNotReachHere(); 1528 } 1529 1530 } else if (right->is_constant()) { 1531 // cpu register - constant 1532 jint c = right->as_constant_ptr()->as_jint(); 1533 switch (code) { 1534 case lir_add: 1535 __ add2reg_32(lreg, c); 1536 break; 1537 case lir_sub: 1538 __ add2reg_32(lreg, java_negate(c)); 1539 break; 1540 case lir_mul: __ z_msfi(lreg, c); break; 1541 default: ShouldNotReachHere(); 1542 } 1543 1544 } else { 1545 ShouldNotReachHere(); 1546 } 1547 1548 } else if (left->is_double_cpu()) { 1549 assert(left == dest, "left and dest must be equal"); 1550 Register lreg_lo = left->as_register_lo(); 1551 Register lreg_hi = left->as_register_hi(); 1552 1553 if (right->is_double_cpu()) { 1554 // cpu register - cpu register 1555 Register rreg_lo = right->as_register_lo(); 1556 Register rreg_hi = right->as_register_hi(); 1557 assert_different_registers(lreg_lo, rreg_lo); 1558 switch (code) { 1559 case lir_add: 1560 __ z_agr(lreg_lo, rreg_lo); 1561 break; 1562 case lir_sub: 1563 __ z_sgr(lreg_lo, rreg_lo); 1564 break; 1565 case lir_mul: 1566 __ z_msgr(lreg_lo, rreg_lo); 1567 break; 1568 default: 1569 ShouldNotReachHere(); 1570 } 1571 1572 } else if (right->is_constant()) { 1573 // cpu register - constant 1574 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1575 switch (code) { 1576 case lir_add: __ z_agfi(lreg_lo, c); break; 1577 case lir_sub: 1578 if (c != min_jint) { 1579 __ z_agfi(lreg_lo, -c); 1580 } else { 1581 // -min_jint cannot be represented as simm32 in z_agfi 1582 // min_jint sign extended: 0xffffffff80000000 1583 // -min_jint as 64 bit integer: 0x0000000080000000 1584 // 0x80000000 can be represented as uimm32 in z_algfi 1585 // lreg_lo := lreg_lo + -min_jint == lreg_lo + 0x80000000 1586 __ z_algfi(lreg_lo, UCONST64(0x80000000)); 1587 } 1588 break; 1589 case lir_mul: __ z_msgfi(lreg_lo, c); break; 1590 default: 1591 ShouldNotReachHere(); 1592 } 1593 1594 } else { 1595 ShouldNotReachHere(); 1596 } 1597 1598 } else if (left->is_single_fpu()) { 1599 assert(left == dest, "left and dest must be equal"); 1600 FloatRegister lreg = left->as_float_reg(); 1601 FloatRegister rreg = right->is_single_fpu() ? right->as_float_reg() : fnoreg; 1602 Address raddr; 1603 1604 if (rreg == fnoreg) { 1605 assert(right->is_single_stack(), "constants should be loaded into register"); 1606 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1607 if (!Immediate::is_uimm12(raddr.disp())) { 1608 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, false); 1609 } 1610 } 1611 1612 if (rreg != fnoreg) { 1613 switch (code) { 1614 case lir_add: __ z_aebr(lreg, rreg); break; 1615 case lir_sub: __ z_sebr(lreg, rreg); break; 1616 case lir_mul: __ z_meebr(lreg, rreg); break; 1617 case lir_div: __ z_debr(lreg, rreg); break; 1618 default: ShouldNotReachHere(); 1619 } 1620 } else { 1621 switch (code) { 1622 case lir_add: __ z_aeb(lreg, raddr); break; 1623 case lir_sub: __ z_seb(lreg, raddr); break; 1624 case lir_mul: __ z_meeb(lreg, raddr); break; 1625 case lir_div: __ z_deb(lreg, raddr); break; 1626 default: ShouldNotReachHere(); 1627 } 1628 } 1629 } else if (left->is_double_fpu()) { 1630 assert(left == dest, "left and dest must be equal"); 1631 FloatRegister lreg = left->as_double_reg(); 1632 FloatRegister rreg = right->is_double_fpu() ? right->as_double_reg() : fnoreg; 1633 Address raddr; 1634 1635 if (rreg == fnoreg) { 1636 assert(right->is_double_stack(), "constants should be loaded into register"); 1637 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 1638 if (!Immediate::is_uimm12(raddr.disp())) { 1639 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, true); 1640 } 1641 } 1642 1643 if (rreg != fnoreg) { 1644 switch (code) { 1645 case lir_add: __ z_adbr(lreg, rreg); break; 1646 case lir_sub: __ z_sdbr(lreg, rreg); break; 1647 case lir_mul: __ z_mdbr(lreg, rreg); break; 1648 case lir_div: __ z_ddbr(lreg, rreg); break; 1649 default: ShouldNotReachHere(); 1650 } 1651 } else { 1652 switch (code) { 1653 case lir_add: __ z_adb(lreg, raddr); break; 1654 case lir_sub: __ z_sdb(lreg, raddr); break; 1655 case lir_mul: __ z_mdb(lreg, raddr); break; 1656 case lir_div: __ z_ddb(lreg, raddr); break; 1657 default: ShouldNotReachHere(); 1658 } 1659 } 1660 } else if (left->is_address()) { 1661 assert(left == dest, "left and dest must be equal"); 1662 assert(code == lir_add, "unsupported operation"); 1663 assert(right->is_constant(), "unsupported operand"); 1664 jint c = right->as_constant_ptr()->as_jint(); 1665 LIR_Address* lir_addr = left->as_address_ptr(); 1666 Address addr = as_Address(lir_addr); 1667 switch (lir_addr->type()) { 1668 case T_INT: 1669 __ add2mem_32(addr, c, Z_R1_scratch); 1670 break; 1671 case T_LONG: 1672 __ add2mem_64(addr, c, Z_R1_scratch); 1673 break; 1674 default: 1675 ShouldNotReachHere(); 1676 } 1677 } else { 1678 ShouldNotReachHere(); 1679 } 1680 } 1681 1682 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1683 switch (code) { 1684 case lir_sqrt: { 1685 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1686 FloatRegister src_reg = value->as_double_reg(); 1687 FloatRegister dst_reg = dest->as_double_reg(); 1688 __ z_sqdbr(dst_reg, src_reg); 1689 break; 1690 } 1691 case lir_abs: { 1692 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1693 FloatRegister src_reg = value->as_double_reg(); 1694 FloatRegister dst_reg = dest->as_double_reg(); 1695 __ z_lpdbr(dst_reg, src_reg); 1696 break; 1697 } 1698 default: { 1699 ShouldNotReachHere(); 1700 break; 1701 } 1702 } 1703 } 1704 1705 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1706 if (left->is_single_cpu()) { 1707 Register reg = left->as_register(); 1708 if (right->is_constant()) { 1709 int val = right->as_constant_ptr()->as_jint(); 1710 switch (code) { 1711 case lir_logic_and: __ z_nilf(reg, val); break; 1712 case lir_logic_or: __ z_oilf(reg, val); break; 1713 case lir_logic_xor: __ z_xilf(reg, val); break; 1714 default: ShouldNotReachHere(); 1715 } 1716 } else if (right->is_stack()) { 1717 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1718 switch (code) { 1719 case lir_logic_and: __ z_ny(reg, raddr); break; 1720 case lir_logic_or: __ z_oy(reg, raddr); break; 1721 case lir_logic_xor: __ z_xy(reg, raddr); break; 1722 default: ShouldNotReachHere(); 1723 } 1724 } else { 1725 Register rright = right->as_register(); 1726 switch (code) { 1727 case lir_logic_and: __ z_nr(reg, rright); break; 1728 case lir_logic_or : __ z_or(reg, rright); break; 1729 case lir_logic_xor: __ z_xr(reg, rright); break; 1730 default: ShouldNotReachHere(); 1731 } 1732 } 1733 move_regs(reg, dst->as_register()); 1734 } else { 1735 Register l_lo = left->as_register_lo(); 1736 if (right->is_constant()) { 1737 __ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong()); 1738 switch (code) { 1739 case lir_logic_and: 1740 __ z_ngr(l_lo, Z_R1_scratch); 1741 break; 1742 case lir_logic_or: 1743 __ z_ogr(l_lo, Z_R1_scratch); 1744 break; 1745 case lir_logic_xor: 1746 __ z_xgr(l_lo, Z_R1_scratch); 1747 break; 1748 default: ShouldNotReachHere(); 1749 } 1750 } else { 1751 Register r_lo; 1752 if (is_reference_type(right->type())) { 1753 r_lo = right->as_register(); 1754 } else { 1755 r_lo = right->as_register_lo(); 1756 } 1757 switch (code) { 1758 case lir_logic_and: 1759 __ z_ngr(l_lo, r_lo); 1760 break; 1761 case lir_logic_or: 1762 __ z_ogr(l_lo, r_lo); 1763 break; 1764 case lir_logic_xor: 1765 __ z_xgr(l_lo, r_lo); 1766 break; 1767 default: ShouldNotReachHere(); 1768 } 1769 } 1770 1771 Register dst_lo = dst->as_register_lo(); 1772 1773 move_regs(l_lo, dst_lo); 1774 } 1775 } 1776 1777 // See operand selection in LIRGenerator::do_ArithmeticOp_Int(). 1778 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 1779 if (left->is_double_cpu()) { 1780 // 64 bit integer case 1781 assert(left->is_double_cpu(), "left must be register"); 1782 assert(right->is_double_cpu() || is_power_of_2(right->as_jlong()), 1783 "right must be register or power of 2 constant"); 1784 assert(result->is_double_cpu(), "result must be register"); 1785 1786 Register lreg = left->as_register_lo(); 1787 Register dreg = result->as_register_lo(); 1788 1789 if (right->is_constant()) { 1790 // Convert division by a power of two into some shifts and logical operations. 1791 Register treg1 = Z_R0_scratch; 1792 Register treg2 = Z_R1_scratch; 1793 jlong divisor = right->as_jlong(); 1794 jlong log_divisor = log2i_exact(right->as_jlong()); 1795 1796 if (divisor == min_jlong) { 1797 // Min_jlong is special. Result is '0' except for min_jlong/min_jlong = 1. 1798 if (dreg == lreg) { 1799 NearLabel done; 1800 __ load_const_optimized(treg2, min_jlong); 1801 __ z_cgr(lreg, treg2); 1802 __ z_lghi(dreg, 0); // Preserves condition code. 1803 __ z_brne(done); 1804 __ z_lghi(dreg, 1); // min_jlong / min_jlong = 1 1805 __ bind(done); 1806 } else { 1807 assert_different_registers(dreg, lreg); 1808 NearLabel done; 1809 __ z_lghi(dreg, 0); 1810 __ compare64_and_branch(lreg, min_jlong, Assembler::bcondNotEqual, done); 1811 __ z_lghi(dreg, 1); 1812 __ bind(done); 1813 } 1814 return; 1815 } 1816 __ move_reg_if_needed(dreg, T_LONG, lreg, T_LONG); 1817 if (divisor == 2) { 1818 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1819 } else { 1820 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1821 __ and_imm(treg2, divisor - 1, treg1, true); 1822 } 1823 if (code == lir_idiv) { 1824 __ z_agr(dreg, treg2); 1825 __ z_srag(dreg, dreg, log_divisor); 1826 } else { 1827 assert(code == lir_irem, "check"); 1828 __ z_agr(treg2, dreg); 1829 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1830 __ z_sgr(dreg, treg2); 1831 } 1832 return; 1833 } 1834 1835 // Divisor is not a power of 2 constant. 1836 Register rreg = right->as_register_lo(); 1837 Register treg = temp->as_register_lo(); 1838 assert(right->is_double_cpu(), "right must be register"); 1839 assert(lreg == Z_R11, "see ldivInOpr()"); 1840 assert(rreg != lreg, "right register must not be same as left register"); 1841 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) || 1842 (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see ldivInOpr(), ldivOutOpr(), lremOutOpr()"); 1843 1844 Register R1 = lreg->predecessor(); 1845 Register R2 = rreg; 1846 assert(code != lir_idiv || lreg==dreg, "see code below"); 1847 if (code == lir_idiv) { 1848 __ z_lcgr(lreg, lreg); 1849 } else { 1850 __ clear_reg(dreg, true, false); 1851 } 1852 NearLabel done; 1853 __ compare64_and_branch(R2, -1, Assembler::bcondEqual, done); 1854 if (code == lir_idiv) { 1855 __ z_lcgr(lreg, lreg); // Revert lcgr above. 1856 } 1857 if (ImplicitDiv0Checks) { 1858 // No debug info because the idiv won't trap. 1859 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1860 // which is unnecessary, too. 1861 add_debug_info_for_div0(__ offset(), info); 1862 } 1863 __ z_dsgr(R1, R2); 1864 __ bind(done); 1865 return; 1866 } 1867 1868 // 32 bit integer case 1869 1870 assert(left->is_single_cpu(), "left must be register"); 1871 assert(right->is_single_cpu() || is_power_of_2(right->as_jint()), "right must be register or power of 2 constant"); 1872 assert(result->is_single_cpu(), "result must be register"); 1873 1874 Register lreg = left->as_register(); 1875 Register dreg = result->as_register(); 1876 1877 if (right->is_constant()) { 1878 // Convert division by a power of two into some shifts and logical operations. 1879 Register treg1 = Z_R0_scratch; 1880 Register treg2 = Z_R1_scratch; 1881 jlong divisor = right->as_jint(); 1882 jlong log_divisor = log2i_exact(right->as_jint()); 1883 __ move_reg_if_needed(dreg, T_LONG, lreg, T_INT); // sign extend 1884 if (divisor == 2) { 1885 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1886 } else { 1887 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1888 __ and_imm(treg2, divisor - 1, treg1, true); 1889 } 1890 if (code == lir_idiv) { 1891 __ z_agr(dreg, treg2); 1892 __ z_srag(dreg, dreg, log_divisor); 1893 } else { 1894 assert(code == lir_irem, "check"); 1895 __ z_agr(treg2, dreg); 1896 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1897 __ z_sgr(dreg, treg2); 1898 } 1899 return; 1900 } 1901 1902 // Divisor is not a power of 2 constant. 1903 Register rreg = right->as_register(); 1904 Register treg = temp->as_register(); 1905 assert(right->is_single_cpu(), "right must be register"); 1906 assert(lreg == Z_R11, "left register must be rax,"); 1907 assert(rreg != lreg, "right register must not be same as left register"); 1908 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) 1909 || (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see divInOpr(), divOutOpr(), remOutOpr()"); 1910 1911 Register R1 = lreg->predecessor(); 1912 Register R2 = rreg; 1913 __ move_reg_if_needed(lreg, T_LONG, lreg, T_INT); // sign extend 1914 if (ImplicitDiv0Checks) { 1915 // No debug info because the idiv won't trap. 1916 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1917 // which is unnecessary, too. 1918 add_debug_info_for_div0(__ offset(), info); 1919 } 1920 __ z_dsgfr(R1, R2); 1921 } 1922 1923 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1924 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1925 assert(exceptionPC->as_register() == Z_EXC_PC, "should match"); 1926 1927 // Exception object is not added to oop map by LinearScan 1928 // (LinearScan assumes that no oops are in fixed registers). 1929 info->add_register_oop(exceptionOop); 1930 1931 // Reuse the debug info from the safepoint poll for the throw op itself. 1932 __ get_PC(Z_EXC_PC); 1933 add_call_info(__ offset(), info); // for exception handler 1934 address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? C1StubId::handle_exception_id 1935 : C1StubId::handle_exception_nofpu_id); 1936 emit_call_c(stub); 1937 } 1938 1939 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1940 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1941 1942 __ branch_optimized(Assembler::bcondAlways, _unwind_handler_entry); 1943 } 1944 1945 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1946 ciArrayKlass* default_type = op->expected_type(); 1947 Register src = op->src()->as_register(); 1948 Register dst = op->dst()->as_register(); 1949 Register src_pos = op->src_pos()->as_register(); 1950 Register dst_pos = op->dst_pos()->as_register(); 1951 Register length = op->length()->as_register(); 1952 Register tmp = op->tmp()->as_register(); 1953 1954 CodeStub* stub = op->stub(); 1955 int flags = op->flags(); 1956 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 1957 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1958 1959 // If we don't know anything, just go through the generic arraycopy. 1960 if (default_type == nullptr) { 1961 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1962 1963 if (copyfunc_addr == nullptr) { 1964 // Take a slow path for generic arraycopy. 1965 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 1966 __ bind(*stub->continuation()); 1967 return; 1968 } 1969 1970 // Save outgoing arguments in callee saved registers (C convention) in case 1971 // a call to System.arraycopy is needed. 1972 Register callee_saved_src = Z_R10; 1973 Register callee_saved_src_pos = Z_R11; 1974 Register callee_saved_dst = Z_R12; 1975 Register callee_saved_dst_pos = Z_R13; 1976 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 1977 1978 __ lgr_if_needed(callee_saved_src, src); 1979 __ lgr_if_needed(callee_saved_src_pos, src_pos); 1980 __ lgr_if_needed(callee_saved_dst, dst); 1981 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 1982 __ lgr_if_needed(callee_saved_length, length); 1983 1984 // C function requires 64 bit values. 1985 __ z_lgfr(src_pos, src_pos); 1986 __ z_lgfr(dst_pos, dst_pos); 1987 __ z_lgfr(length, length); 1988 1989 // Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint. 1990 1991 // The arguments are in the corresponding registers. 1992 assert(Z_ARG1 == src, "assumption"); 1993 assert(Z_ARG2 == src_pos, "assumption"); 1994 assert(Z_ARG3 == dst, "assumption"); 1995 assert(Z_ARG4 == dst_pos, "assumption"); 1996 assert(Z_ARG5 == length, "assumption"); 1997 #ifndef PRODUCT 1998 if (PrintC1Statistics) { 1999 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt); 2000 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2001 } 2002 #endif 2003 emit_call_c(copyfunc_addr); 2004 CHECK_BAILOUT(); 2005 2006 __ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 2007 2008 __ z_lgr(tmp, Z_RET); 2009 __ z_xilf(tmp, -1); 2010 2011 // Restore values from callee saved registers so they are where the stub 2012 // expects them. 2013 __ lgr_if_needed(src, callee_saved_src); 2014 __ lgr_if_needed(src_pos, callee_saved_src_pos); 2015 __ lgr_if_needed(dst, callee_saved_dst); 2016 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2017 __ lgr_if_needed(length, callee_saved_length); 2018 2019 __ z_sr(length, tmp); 2020 __ z_ar(src_pos, tmp); 2021 __ z_ar(dst_pos, tmp); 2022 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2023 2024 __ bind(*stub->continuation()); 2025 return; 2026 } 2027 2028 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 2029 2030 int elem_size = type2aelembytes(basic_type); 2031 int shift_amount; 2032 2033 switch (elem_size) { 2034 case 1 : 2035 shift_amount = 0; 2036 break; 2037 case 2 : 2038 shift_amount = 1; 2039 break; 2040 case 4 : 2041 shift_amount = 2; 2042 break; 2043 case 8 : 2044 shift_amount = 3; 2045 break; 2046 default: 2047 shift_amount = -1; 2048 ShouldNotReachHere(); 2049 } 2050 2051 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 2052 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 2053 2054 // Length and pos's are all sign extended at this point on 64bit. 2055 2056 // test for null 2057 if (flags & LIR_OpArrayCopy::src_null_check) { 2058 __ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2059 } 2060 if (flags & LIR_OpArrayCopy::dst_null_check) { 2061 __ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2062 } 2063 2064 // Check if negative. 2065 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2066 __ compare32_and_branch(src_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2067 } 2068 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2069 __ compare32_and_branch(dst_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2070 } 2071 2072 // If the compiler was not able to prove that exact type of the source or the destination 2073 // of the arraycopy is an array type, check at runtime if the source or the destination is 2074 // an instance type. 2075 if (flags & LIR_OpArrayCopy::type_check) { 2076 assert(Klass::_lh_neutral_value == 0, "or replace z_lt instructions"); 2077 2078 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2079 __ load_klass(tmp, dst); 2080 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2081 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2082 } 2083 2084 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2085 __ load_klass(tmp, src); 2086 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2087 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2088 } 2089 } 2090 2091 if (flags & LIR_OpArrayCopy::src_range_check) { 2092 __ z_la(tmp, Address(src_pos, length)); 2093 __ z_cl(tmp, src_length_addr); 2094 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2095 } 2096 if (flags & LIR_OpArrayCopy::dst_range_check) { 2097 __ z_la(tmp, Address(dst_pos, length)); 2098 __ z_cl(tmp, dst_length_addr); 2099 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2100 } 2101 2102 if (flags & LIR_OpArrayCopy::length_positive_check) { 2103 __ z_ltr(length, length); 2104 __ branch_optimized(Assembler::bcondNegative, *stub->entry()); 2105 } 2106 2107 // Stubs require 64 bit values. 2108 __ z_lgfr(src_pos, src_pos); // int -> long 2109 __ z_lgfr(dst_pos, dst_pos); // int -> long 2110 __ z_lgfr(length, length); // int -> long 2111 2112 if (flags & LIR_OpArrayCopy::type_check) { 2113 // We don't know the array types are compatible. 2114 if (basic_type != T_OBJECT) { 2115 // Simple test for basic type arrays. 2116 __ cmp_klasses_from_objects(src, dst, tmp, Z_R1_scratch); 2117 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2118 } else { 2119 // For object arrays, if src is a sub class of dst then we can 2120 // safely do the copy. 2121 NearLabel cont, slow; 2122 Register src_klass = Z_R1_scratch; 2123 Register dst_klass = Z_R10; 2124 2125 __ load_klass(src_klass, src); 2126 __ load_klass(dst_klass, dst); 2127 2128 __ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, nullptr); 2129 2130 store_parameter(src_klass, 0); // sub 2131 store_parameter(dst_klass, 1); // super 2132 emit_call_c(Runtime1::entry_for (C1StubId::slow_subtype_check_id)); 2133 CHECK_BAILOUT2(cont, slow); 2134 // Sets condition code 0 for match (2 otherwise). 2135 __ branch_optimized(Assembler::bcondEqual, cont); 2136 2137 __ bind(slow); 2138 2139 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2140 if (copyfunc_addr != nullptr) { // use stub if available 2141 // Src is not a sub class of dst so we have to do a 2142 // per-element check. 2143 2144 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2145 if ((flags & mask) != mask) { 2146 // Check that at least both of them object arrays. 2147 assert(flags & mask, "one of the two should be known to be an object array"); 2148 2149 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2150 __ load_klass(tmp, src); 2151 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2152 __ load_klass(tmp, dst); 2153 } 2154 Address klass_lh_addr(tmp, Klass::layout_helper_offset()); 2155 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2156 __ load_const_optimized(Z_R1_scratch, objArray_lh); 2157 __ z_c(Z_R1_scratch, klass_lh_addr); 2158 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2159 } 2160 2161 // Save outgoing arguments in callee saved registers (C convention) in case 2162 // a call to System.arraycopy is needed. 2163 Register callee_saved_src = Z_R10; 2164 Register callee_saved_src_pos = Z_R11; 2165 Register callee_saved_dst = Z_R12; 2166 Register callee_saved_dst_pos = Z_R13; 2167 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 2168 2169 __ lgr_if_needed(callee_saved_src, src); 2170 __ lgr_if_needed(callee_saved_src_pos, src_pos); 2171 __ lgr_if_needed(callee_saved_dst, dst); 2172 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 2173 __ lgr_if_needed(callee_saved_length, length); 2174 2175 __ z_llgfr(length, length); // Higher 32bits must be null. 2176 2177 __ z_sllg(Z_ARG1, src_pos, shift_amount); // index -> byte offset 2178 __ z_sllg(Z_ARG2, dst_pos, shift_amount); // index -> byte offset 2179 2180 __ z_la(Z_ARG1, Address(src, Z_ARG1, arrayOopDesc::base_offset_in_bytes(basic_type))); 2181 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2182 __ z_la(Z_ARG2, Address(dst, Z_ARG2, arrayOopDesc::base_offset_in_bytes(basic_type))); 2183 assert_different_registers(Z_ARG2, dst, length); 2184 2185 __ z_lgr(Z_ARG3, length); 2186 assert_different_registers(Z_ARG3, dst); 2187 2188 __ load_klass(Z_ARG5, dst); 2189 __ z_lg(Z_ARG5, Address(Z_ARG5, ObjArrayKlass::element_klass_offset())); 2190 __ z_lg(Z_ARG4, Address(Z_ARG5, Klass::super_check_offset_offset())); 2191 emit_call_c(copyfunc_addr); 2192 CHECK_BAILOUT2(cont, slow); 2193 2194 #ifndef PRODUCT 2195 if (PrintC1Statistics) { 2196 NearLabel failed; 2197 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondNotEqual, failed); 2198 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_cnt); 2199 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2200 __ bind(failed); 2201 } 2202 #endif 2203 2204 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 2205 2206 #ifndef PRODUCT 2207 if (PrintC1Statistics) { 2208 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_attempt_cnt); 2209 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2210 } 2211 #endif 2212 2213 __ z_lgr(tmp, Z_RET); 2214 __ z_xilf(tmp, -1); 2215 2216 // Restore previously spilled arguments 2217 __ lgr_if_needed(src, callee_saved_src); 2218 __ lgr_if_needed(src_pos, callee_saved_src_pos); 2219 __ lgr_if_needed(dst, callee_saved_dst); 2220 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2221 __ lgr_if_needed(length, callee_saved_length); 2222 2223 __ z_sr(length, tmp); 2224 __ z_ar(src_pos, tmp); 2225 __ z_ar(dst_pos, tmp); 2226 } 2227 2228 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2229 2230 __ bind(cont); 2231 } 2232 } 2233 2234 #ifdef ASSERT 2235 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2236 // Sanity check the known type with the incoming class. For the 2237 // primitive case the types must match exactly with src.klass and 2238 // dst.klass each exactly matching the default type. For the 2239 // object array case, if no type check is needed then either the 2240 // dst type is exactly the expected type and the src type is a 2241 // subtype which we can't check or src is the same array as dst 2242 // but not necessarily exactly of type default_type. 2243 NearLabel known_ok, halt; 2244 metadata2reg(default_type->constant_encoding(), tmp); 2245 if (UseCompressedClassPointers) { 2246 __ encode_klass_not_null(tmp); 2247 } 2248 2249 if (basic_type != T_OBJECT) { 2250 __ cmp_klass(tmp, dst, Z_R1_scratch); 2251 __ branch_optimized(Assembler::bcondNotEqual, halt); 2252 2253 __ cmp_klass(tmp, src, Z_R1_scratch); 2254 __ branch_optimized(Assembler::bcondEqual, known_ok); 2255 } else { 2256 __ cmp_klass(tmp, dst, Z_R1_scratch); 2257 __ branch_optimized(Assembler::bcondEqual, known_ok); 2258 __ compareU64_and_branch(src, dst, Assembler::bcondEqual, known_ok); 2259 } 2260 __ bind(halt); 2261 __ stop("incorrect type information in arraycopy"); 2262 __ bind(known_ok); 2263 } 2264 #endif 2265 2266 #ifndef PRODUCT 2267 if (PrintC1Statistics) { 2268 __ load_const_optimized(Z_R1_scratch, Runtime1::arraycopy_count_address(basic_type)); 2269 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2270 } 2271 #endif 2272 2273 __ z_sllg(tmp, src_pos, shift_amount); // index -> byte offset 2274 __ z_sllg(Z_R1_scratch, dst_pos, shift_amount); // index -> byte offset 2275 2276 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2277 __ z_la(Z_ARG1, Address(src, tmp, arrayOopDesc::base_offset_in_bytes(basic_type))); 2278 assert_different_registers(Z_ARG2, length); 2279 __ z_la(Z_ARG2, Address(dst, Z_R1_scratch, arrayOopDesc::base_offset_in_bytes(basic_type))); 2280 __ lgr_if_needed(Z_ARG3, length); 2281 2282 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2283 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2284 const char *name; 2285 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2286 __ call_VM_leaf(entry); 2287 2288 if (stub != nullptr) { 2289 __ bind(*stub->continuation()); 2290 } 2291 } 2292 2293 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2294 if (dest->is_single_cpu()) { 2295 if (left->type() == T_OBJECT) { 2296 switch (code) { 2297 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2298 case lir_shr: __ z_srag (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2299 case lir_ushr: __ z_srlg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2300 default: ShouldNotReachHere(); 2301 } 2302 } else { 2303 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2304 Register masked_count = Z_R1_scratch; 2305 __ z_lr(masked_count, count->as_register()); 2306 __ z_nill(masked_count, 31); 2307 switch (code) { 2308 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, masked_count); break; 2309 case lir_shr: __ z_sra (dest->as_register(), 0, masked_count); break; 2310 case lir_ushr: __ z_srl (dest->as_register(), 0, masked_count); break; 2311 default: ShouldNotReachHere(); 2312 } 2313 } 2314 } else { 2315 switch (code) { 2316 case lir_shl: __ z_sllg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2317 case lir_shr: __ z_srag (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2318 case lir_ushr: __ z_srlg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2319 default: ShouldNotReachHere(); 2320 } 2321 } 2322 } 2323 2324 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2325 if (left->type() == T_OBJECT) { 2326 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2327 Register l = left->as_register(); 2328 Register d = dest->as_register_lo(); 2329 switch (code) { 2330 case lir_shl: __ z_sllg (d, l, count); break; 2331 case lir_shr: __ z_srag (d, l, count); break; 2332 case lir_ushr: __ z_srlg (d, l, count); break; 2333 default: ShouldNotReachHere(); 2334 } 2335 return; 2336 } 2337 if (dest->is_single_cpu()) { 2338 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2339 count = count & 0x1F; // Java spec 2340 switch (code) { 2341 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), count); break; 2342 case lir_shr: __ z_sra (dest->as_register(), count); break; 2343 case lir_ushr: __ z_srl (dest->as_register(), count); break; 2344 default: ShouldNotReachHere(); 2345 } 2346 } else if (dest->is_double_cpu()) { 2347 count = count & 63; // Java spec 2348 Register l = left->as_pointer_register(); 2349 Register d = dest->as_pointer_register(); 2350 switch (code) { 2351 case lir_shl: __ z_sllg (d, l, count); break; 2352 case lir_shr: __ z_srag (d, l, count); break; 2353 case lir_ushr: __ z_srlg (d, l, count); break; 2354 default: ShouldNotReachHere(); 2355 } 2356 } else { 2357 ShouldNotReachHere(); 2358 } 2359 } 2360 2361 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2362 if (op->init_check()) { 2363 // Make sure klass is initialized & doesn't have finalizer. 2364 // init_state needs acquire, but S390 is TSO, and so we are already good. 2365 const int state_offset = in_bytes(InstanceKlass::init_state_offset()); 2366 Register iklass = op->klass()->as_register(); 2367 add_debug_info_for_null_check_here(op->stub()->info()); 2368 if (Immediate::is_uimm12(state_offset)) { 2369 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized); 2370 } else { 2371 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized); 2372 } 2373 __ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far. 2374 } 2375 __ allocate_object(op->obj()->as_register(), 2376 op->tmp1()->as_register(), 2377 op->tmp2()->as_register(), 2378 op->header_size(), 2379 op->object_size(), 2380 op->klass()->as_register(), 2381 *op->stub()->entry()); 2382 __ bind(*op->stub()->continuation()); 2383 __ verify_oop(op->obj()->as_register(), FILE_AND_LINE); 2384 } 2385 2386 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2387 Register len = op->len()->as_register(); 2388 __ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend 2389 2390 if (UseSlowPath || 2391 (!UseFastNewObjectArray && (is_reference_type(op->type()))) || 2392 (!UseFastNewTypeArray && (!is_reference_type(op->type())))) { 2393 __ z_brul(*op->stub()->entry()); 2394 } else { 2395 __ allocate_array(op->obj()->as_register(), 2396 op->len()->as_register(), 2397 op->tmp1()->as_register(), 2398 op->tmp2()->as_register(), 2399 arrayOopDesc::base_offset_in_bytes(op->type()), 2400 type2aelembytes(op->type()), 2401 op->klass()->as_register(), 2402 *op->stub()->entry(), 2403 op->zero_array()); 2404 } 2405 __ bind(*op->stub()->continuation()); 2406 } 2407 2408 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, 2409 Register recv, Register tmp1, Label* update_done) { 2410 uint i; 2411 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2412 Label next_test; 2413 // See if the receiver is receiver[n]. 2414 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2415 __ z_cg(recv, receiver_addr); 2416 __ z_brne(next_test); 2417 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 2418 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2419 __ branch_optimized(Assembler::bcondAlways, *update_done); 2420 __ bind(next_test); 2421 } 2422 2423 // Didn't find receiver; find next empty slot and fill it in. 2424 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2425 Label next_test; 2426 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2427 __ z_ltg(Z_R0_scratch, recv_addr); 2428 __ z_brne(next_test); 2429 __ z_stg(recv, recv_addr); 2430 __ load_const_optimized(tmp1, DataLayout::counter_increment); 2431 __ z_stg(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)), mdo); 2432 __ branch_optimized(Assembler::bcondAlways, *update_done); 2433 __ bind(next_test); 2434 } 2435 } 2436 2437 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2438 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2439 Unimplemented(); 2440 } 2441 2442 void LIR_Assembler::store_parameter(Register r, int param_num) { 2443 assert(param_num >= 0, "invalid num"); 2444 int offset_in_bytes = param_num * BytesPerWord; 2445 check_reserved_argument_area(offset_in_bytes); 2446 offset_in_bytes += FrameMap::first_available_sp_in_frame; 2447 __ z_stg(r, offset_in_bytes, Z_SP); 2448 } 2449 2450 void LIR_Assembler::store_parameter(jint c, int param_num) { 2451 assert(param_num >= 0, "invalid num"); 2452 int offset_in_bytes = param_num * BytesPerWord; 2453 check_reserved_argument_area(offset_in_bytes); 2454 offset_in_bytes += FrameMap::first_available_sp_in_frame; 2455 __ store_const(Address(Z_SP, offset_in_bytes), c, Z_R1_scratch, true); 2456 } 2457 2458 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2459 // We always need a stub for the failure case. 2460 CodeStub* stub = op->stub(); 2461 Register obj = op->object()->as_register(); 2462 Register k_RInfo = op->tmp1()->as_register(); 2463 Register klass_RInfo = op->tmp2()->as_register(); 2464 Register dst = op->result_opr()->as_register(); 2465 Register Rtmp1 = Z_R1_scratch; 2466 ciKlass* k = op->klass(); 2467 2468 assert(!op->tmp3()->is_valid(), "tmp3's not needed"); 2469 2470 // Check if it needs to be profiled. 2471 ciMethodData* md = nullptr; 2472 ciProfileData* data = nullptr; 2473 2474 if (op->should_profile()) { 2475 ciMethod* method = op->profiled_method(); 2476 assert(method != nullptr, "Should have method"); 2477 int bci = op->profiled_bci(); 2478 md = method->method_data_or_null(); 2479 assert(md != nullptr, "Sanity"); 2480 data = md->bci_to_data(bci); 2481 assert(data != nullptr, "need data for type check"); 2482 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2483 } 2484 2485 // Temp operands do not overlap with inputs, if this is their last 2486 // use (end of range is exclusive), so a register conflict is possible. 2487 if (obj == k_RInfo) { 2488 k_RInfo = dst; 2489 } else if (obj == klass_RInfo) { 2490 klass_RInfo = dst; 2491 } 2492 assert_different_registers(obj, k_RInfo, klass_RInfo); 2493 2494 if (op->should_profile()) { 2495 Register mdo = klass_RInfo; 2496 metadata2reg(md->constant_encoding(), mdo); 2497 NearLabel not_null; 2498 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2499 // Object is null; update MDO and exit. 2500 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2501 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2502 __ or2mem_8(data_addr, header_bits); 2503 __ branch_optimized(Assembler::bcondAlways, *obj_is_null); 2504 __ bind(not_null); 2505 2506 NearLabel update_done; 2507 Register recv = k_RInfo; 2508 __ load_klass(recv, obj); 2509 type_profile_helper(mdo, md, data, recv, Rtmp1, &update_done); 2510 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2511 __ add2mem_64(counter_addr, DataLayout::counter_increment, Rtmp1); 2512 __ bind(update_done); 2513 } else { 2514 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondEqual, *obj_is_null); 2515 } 2516 2517 Label *failure_target = failure; 2518 Label *success_target = success; 2519 2520 // Patching may screw with our temporaries, 2521 // so let's do it before loading the class. 2522 if (k->is_loaded()) { 2523 metadata2reg(k->constant_encoding(), k_RInfo); 2524 } else { 2525 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2526 } 2527 assert(obj != k_RInfo, "must be different"); 2528 2529 __ verify_oop(obj, FILE_AND_LINE); 2530 2531 // Get object class. 2532 // Not a safepoint as obj null check happens earlier. 2533 if (op->fast_check()) { 2534 if (UseCompressedClassPointers) { 2535 __ load_klass(klass_RInfo, obj); 2536 __ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target); 2537 } else { 2538 __ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 2539 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2540 } 2541 // Successful cast, fall through to profile or jump. 2542 } else { 2543 bool need_slow_path = !k->is_loaded() || 2544 ((int) k->super_check_offset() == in_bytes(Klass::secondary_super_cache_offset())); 2545 __ load_klass(klass_RInfo, obj); 2546 // Perform the fast part of the checking logic. 2547 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, 2548 (need_slow_path ? success_target : nullptr), 2549 failure_target, nullptr); 2550 if (need_slow_path) { 2551 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2552 address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id); 2553 store_parameter(klass_RInfo, 0); // sub 2554 store_parameter(k_RInfo, 1); // super 2555 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2556 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2557 // Fall through to success case. 2558 } 2559 } 2560 2561 __ branch_optimized(Assembler::bcondAlways, *success); 2562 } 2563 2564 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2565 LIR_Code code = op->code(); 2566 if (code == lir_store_check) { 2567 Register value = op->object()->as_register(); 2568 Register array = op->array()->as_register(); 2569 Register k_RInfo = op->tmp1()->as_register(); 2570 Register klass_RInfo = op->tmp2()->as_register(); 2571 Register Rtmp1 = Z_R1_scratch; 2572 2573 CodeStub* stub = op->stub(); 2574 2575 // Check if it needs to be profiled. 2576 ciMethodData* md = nullptr; 2577 ciProfileData* data = nullptr; 2578 2579 assert_different_registers(value, k_RInfo, klass_RInfo); 2580 2581 if (op->should_profile()) { 2582 ciMethod* method = op->profiled_method(); 2583 assert(method != nullptr, "Should have method"); 2584 int bci = op->profiled_bci(); 2585 md = method->method_data_or_null(); 2586 assert(md != nullptr, "Sanity"); 2587 data = md->bci_to_data(bci); 2588 assert(data != nullptr, "need data for type check"); 2589 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2590 } 2591 NearLabel done; 2592 Label *success_target = &done; 2593 Label *failure_target = stub->entry(); 2594 2595 if (op->should_profile()) { 2596 Register mdo = klass_RInfo; 2597 metadata2reg(md->constant_encoding(), mdo); 2598 NearLabel not_null; 2599 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2600 // Object is null; update MDO and exit. 2601 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2602 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2603 __ or2mem_8(data_addr, header_bits); 2604 __ branch_optimized(Assembler::bcondAlways, done); 2605 __ bind(not_null); 2606 2607 NearLabel update_done; 2608 Register recv = k_RInfo; 2609 __ load_klass(recv, value); 2610 type_profile_helper(mdo, md, data, recv, Rtmp1, &update_done); 2611 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2612 __ add2mem_64(counter_addr, DataLayout::counter_increment, Rtmp1); 2613 __ bind(update_done); 2614 } else { 2615 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondEqual, done); 2616 } 2617 2618 add_debug_info_for_null_check_here(op->info_for_exception()); 2619 __ load_klass(k_RInfo, array); 2620 __ load_klass(klass_RInfo, value); 2621 2622 // Get instance klass (it's already uncompressed). 2623 __ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 2624 // Perform the fast part of the checking logic. 2625 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 2626 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2627 address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id); 2628 store_parameter(klass_RInfo, 0); // sub 2629 store_parameter(k_RInfo, 1); // super 2630 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2631 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2632 // Fall through to success case. 2633 2634 __ bind(done); 2635 } else { 2636 if (code == lir_checkcast) { 2637 Register obj = op->object()->as_register(); 2638 Register dst = op->result_opr()->as_register(); 2639 NearLabel success; 2640 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2641 __ bind(success); 2642 __ lgr_if_needed(dst, obj); 2643 } else { 2644 if (code == lir_instanceof) { 2645 Register obj = op->object()->as_register(); 2646 Register dst = op->result_opr()->as_register(); 2647 NearLabel success, failure, done; 2648 emit_typecheck_helper(op, &success, &failure, &failure); 2649 __ bind(failure); 2650 __ clear_reg(dst); 2651 __ branch_optimized(Assembler::bcondAlways, done); 2652 __ bind(success); 2653 __ load_const_optimized(dst, 1); 2654 __ bind(done); 2655 } else { 2656 ShouldNotReachHere(); 2657 } 2658 } 2659 } 2660 } 2661 2662 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2663 Register addr = op->addr()->as_pointer_register(); 2664 Register t1_cmp = Z_R1_scratch; 2665 if (op->code() == lir_cas_long) { 2666 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2667 Register new_value_lo = op->new_value()->as_register_lo(); 2668 __ z_lgr(t1_cmp, cmp_value_lo); 2669 // Perform the compare and swap operation. 2670 __ z_csg(t1_cmp, new_value_lo, 0, addr); 2671 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2672 Register cmp_value = op->cmp_value()->as_register(); 2673 Register new_value = op->new_value()->as_register(); 2674 if (op->code() == lir_cas_obj) { 2675 if (UseCompressedOops) { 2676 t1_cmp = op->tmp1()->as_register(); 2677 Register t2_new = op->tmp2()->as_register(); 2678 assert_different_registers(cmp_value, new_value, addr, t1_cmp, t2_new); 2679 __ oop_encoder(t1_cmp, cmp_value, true /*maybe null*/); 2680 __ oop_encoder(t2_new, new_value, true /*maybe null*/); 2681 __ z_cs(t1_cmp, t2_new, 0, addr); 2682 } else { 2683 __ z_lgr(t1_cmp, cmp_value); 2684 __ z_csg(t1_cmp, new_value, 0, addr); 2685 } 2686 } else { 2687 __ z_lr(t1_cmp, cmp_value); 2688 __ z_cs(t1_cmp, new_value, 0, addr); 2689 } 2690 } else { 2691 ShouldNotReachHere(); // new lir_cas_?? 2692 } 2693 } 2694 2695 void LIR_Assembler::breakpoint() { 2696 Unimplemented(); 2697 // __ breakpoint_trap(); 2698 } 2699 2700 void LIR_Assembler::push(LIR_Opr opr) { 2701 ShouldNotCallThis(); // unused 2702 } 2703 2704 void LIR_Assembler::pop(LIR_Opr opr) { 2705 ShouldNotCallThis(); // unused 2706 } 2707 2708 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2709 Address addr = frame_map()->address_for_monitor_lock(monitor_no); 2710 __ add2reg(dst_opr->as_register(), addr.disp(), addr.base()); 2711 } 2712 2713 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2714 Register obj = op->obj_opr()->as_register(); // May not be an oop. 2715 Register hdr = op->hdr_opr()->as_register(); 2716 Register lock = op->lock_opr()->as_register(); 2717 if (LockingMode == LM_MONITOR) { 2718 if (op->info() != nullptr) { 2719 add_debug_info_for_null_check_here(op->info()); 2720 __ null_check(obj); 2721 } 2722 __ branch_optimized(Assembler::bcondAlways, *op->stub()->entry()); 2723 } else if (op->code() == lir_lock) { 2724 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2725 // Add debug info for NullPointerException only if one is possible. 2726 if (op->info() != nullptr) { 2727 add_debug_info_for_null_check_here(op->info()); 2728 } 2729 __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2730 // done 2731 } else if (op->code() == lir_unlock) { 2732 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2733 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2734 } else { 2735 ShouldNotReachHere(); 2736 } 2737 __ bind(*op->stub()->continuation()); 2738 } 2739 2740 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 2741 Register obj = op->obj()->as_pointer_register(); 2742 Register result = op->result_opr()->as_pointer_register(); 2743 2744 CodeEmitInfo* info = op->info(); 2745 if (info != nullptr) { 2746 add_debug_info_for_null_check_here(info); 2747 } 2748 2749 __ load_klass(result, obj); 2750 } 2751 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2752 ciMethod* method = op->profiled_method(); 2753 int bci = op->profiled_bci(); 2754 ciMethod* callee = op->profiled_callee(); 2755 2756 // Update counter for all call types. 2757 ciMethodData* md = method->method_data_or_null(); 2758 assert(md != nullptr, "Sanity"); 2759 ciProfileData* data = md->bci_to_data(bci); 2760 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 2761 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2762 Register mdo = op->mdo()->as_register(); 2763 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2764 Register tmp1 = op->tmp1()->as_register_lo(); 2765 metadata2reg(md->constant_encoding(), mdo); 2766 2767 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2768 // Perform additional virtual call profiling for invokevirtual and 2769 // invokeinterface bytecodes 2770 if (op->should_profile_receiver_type()) { 2771 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2772 Register recv = op->recv()->as_register(); 2773 assert_different_registers(mdo, tmp1, recv); 2774 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2775 ciKlass* known_klass = op->known_holder(); 2776 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 2777 // We know the type that will be seen at this call site; we can 2778 // statically update the MethodData* rather than needing to do 2779 // dynamic tests on the receiver type. 2780 2781 // NOTE: we should probably put a lock around this search to 2782 // avoid collisions by concurrent compilations. 2783 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2784 uint i; 2785 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2786 ciKlass* receiver = vc_data->receiver(i); 2787 if (known_klass->equals(receiver)) { 2788 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2789 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2790 return; 2791 } 2792 } 2793 2794 // Receiver type not found in profile data. Select an empty slot. 2795 2796 // Note that this is less efficient than it should be because it 2797 // always does a write to the receiver part of the 2798 // VirtualCallData rather than just the first time. 2799 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2800 ciKlass* receiver = vc_data->receiver(i); 2801 if (receiver == nullptr) { 2802 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 2803 metadata2reg(known_klass->constant_encoding(), tmp1); 2804 __ z_stg(tmp1, recv_addr); 2805 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2806 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2807 return; 2808 } 2809 } 2810 } else { 2811 __ load_klass(recv, recv); 2812 NearLabel update_done; 2813 type_profile_helper(mdo, md, data, recv, tmp1, &update_done); 2814 // Receiver did not match any saved receiver and there is no empty row for it. 2815 // Increment total counter to indicate polymorphic case. 2816 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2817 __ bind(update_done); 2818 } 2819 } else { 2820 // static call 2821 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2822 } 2823 } 2824 2825 void LIR_Assembler::align_backward_branch_target() { 2826 __ align(OptoLoopAlignment); 2827 } 2828 2829 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2830 ShouldNotCallThis(); // There are no delay slots on ZARCH_64. 2831 } 2832 2833 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2834 // tmp must be unused 2835 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2836 assert(left->is_register(), "can only handle registers"); 2837 2838 if (left->is_single_cpu()) { 2839 __ z_lcr(dest->as_register(), left->as_register()); 2840 } else if (left->is_single_fpu()) { 2841 __ z_lcebr(dest->as_float_reg(), left->as_float_reg()); 2842 } else if (left->is_double_fpu()) { 2843 __ z_lcdbr(dest->as_double_reg(), left->as_double_reg()); 2844 } else { 2845 assert(left->is_double_cpu(), "Must be a long"); 2846 __ z_lcgr(dest->as_register_lo(), left->as_register_lo()); 2847 } 2848 } 2849 2850 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2851 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2852 assert(!tmp->is_valid(), "don't need temporary"); 2853 emit_call_c(dest); 2854 CHECK_BAILOUT(); 2855 if (info != nullptr) { 2856 add_call_info_here(info); 2857 } 2858 } 2859 2860 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2861 ShouldNotCallThis(); // not needed on ZARCH_64 2862 } 2863 2864 void LIR_Assembler::membar() { 2865 __ z_fence(); 2866 } 2867 2868 void LIR_Assembler::membar_acquire() { 2869 __ z_acquire(); 2870 } 2871 2872 void LIR_Assembler::membar_release() { 2873 __ z_release(); 2874 } 2875 2876 void LIR_Assembler::membar_loadload() { 2877 __ z_acquire(); 2878 } 2879 2880 void LIR_Assembler::membar_storestore() { 2881 __ z_release(); 2882 } 2883 2884 void LIR_Assembler::membar_loadstore() { 2885 __ z_acquire(); 2886 } 2887 2888 void LIR_Assembler::membar_storeload() { 2889 __ z_fence(); 2890 } 2891 2892 void LIR_Assembler::on_spin_wait() { 2893 Unimplemented(); 2894 } 2895 2896 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2897 assert(patch_code == lir_patch_none, "Patch code not supported"); 2898 LIR_Address* addr = addr_opr->as_address_ptr(); 2899 assert(addr->scale() == LIR_Address::times_1, "scaling unsupported"); 2900 __ load_address(dest->as_pointer_register(), as_Address(addr)); 2901 } 2902 2903 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2904 ShouldNotCallThis(); // unused 2905 } 2906 2907 #ifdef ASSERT 2908 // Emit run-time assertion. 2909 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2910 Unimplemented(); 2911 } 2912 #endif 2913 2914 void LIR_Assembler::peephole(LIR_List*) { 2915 // Do nothing for now. 2916 } 2917 2918 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2919 assert(code == lir_xadd, "lir_xchg not supported"); 2920 Address src_addr = as_Address(src->as_address_ptr()); 2921 Register base = src_addr.base(); 2922 intptr_t disp = src_addr.disp(); 2923 if (src_addr.index()->is_valid()) { 2924 // LAA and LAAG do not support index register. 2925 __ load_address(Z_R1_scratch, src_addr); 2926 base = Z_R1_scratch; 2927 disp = 0; 2928 } 2929 if (data->type() == T_INT) { 2930 __ z_laa(dest->as_register(), data->as_register(), disp, base); 2931 } else if (data->type() == T_LONG) { 2932 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 2933 __ z_laag(dest->as_register_lo(), data->as_register_lo(), disp, base); 2934 } else { 2935 ShouldNotReachHere(); 2936 } 2937 } 2938 2939 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2940 Register obj = op->obj()->as_register(); 2941 Register tmp1 = op->tmp()->as_pointer_register(); 2942 Register tmp2 = Z_R1_scratch; 2943 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2944 ciKlass* exact_klass = op->exact_klass(); 2945 intptr_t current_klass = op->current_klass(); 2946 bool not_null = op->not_null(); 2947 bool no_conflict = op->no_conflict(); 2948 2949 Label update, next, none, null_seen, init_klass; 2950 2951 bool do_null = !not_null; 2952 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2953 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2954 2955 assert(do_null || do_update, "why are we here?"); 2956 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2957 2958 __ verify_oop(obj, FILE_AND_LINE); 2959 2960 if (do_null || tmp1 != obj DEBUG_ONLY(|| true)) { 2961 __ z_ltgr(tmp1, obj); 2962 } 2963 if (do_null) { 2964 __ z_brnz(update); 2965 if (!TypeEntries::was_null_seen(current_klass)) { 2966 __ z_lg(tmp1, mdo_addr); 2967 __ z_oill(tmp1, TypeEntries::null_seen); 2968 __ z_stg(tmp1, mdo_addr); 2969 } 2970 if (do_update) { 2971 __ z_bru(next); 2972 } 2973 } else { 2974 __ asm_assert(Assembler::bcondNotZero, "unexpected null obj", __LINE__); 2975 } 2976 2977 __ bind(update); 2978 2979 if (do_update) { 2980 #ifdef ASSERT 2981 if (exact_klass != nullptr) { 2982 __ load_klass(tmp1, tmp1); 2983 metadata2reg(exact_klass->constant_encoding(), tmp2); 2984 __ z_cgr(tmp1, tmp2); 2985 __ asm_assert(Assembler::bcondEqual, "exact klass and actual klass differ", __LINE__); 2986 } 2987 #endif 2988 2989 Label do_update; 2990 __ z_lg(tmp2, mdo_addr); 2991 2992 if (!no_conflict) { 2993 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) { 2994 if (exact_klass != nullptr) { 2995 metadata2reg(exact_klass->constant_encoding(), tmp1); 2996 } else { 2997 __ load_klass(tmp1, tmp1); 2998 } 2999 3000 // Klass seen before: nothing to do (regardless of unknown bit). 3001 __ z_lgr(Z_R0_scratch, tmp2); 3002 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 3003 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 3004 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 3005 3006 // Already unknown: Nothing to do anymore. 3007 __ z_tmll(tmp2, TypeEntries::type_unknown); 3008 __ z_brc(Assembler::bcondAllOne, next); 3009 3010 if (TypeEntries::is_type_none(current_klass)) { 3011 __ z_lgr(Z_R0_scratch, tmp2); 3012 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3013 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3014 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass); 3015 } 3016 } else { 3017 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3018 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3019 3020 // Already unknown: Nothing to do anymore. 3021 __ z_tmll(tmp2, TypeEntries::type_unknown); 3022 __ z_brc(Assembler::bcondAllOne, next); 3023 } 3024 3025 // Different than before. Cannot keep accurate profile. 3026 __ z_oill(tmp2, TypeEntries::type_unknown); 3027 __ z_bru(do_update); 3028 } else { 3029 // There's a single possible klass at this profile point. 3030 assert(exact_klass != nullptr, "should be"); 3031 if (TypeEntries::is_type_none(current_klass)) { 3032 metadata2reg(exact_klass->constant_encoding(), tmp1); 3033 __ z_lgr(Z_R0_scratch, tmp2); 3034 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 3035 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 3036 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 3037 #ifdef ASSERT 3038 { 3039 Label ok; 3040 __ z_lgr(Z_R0_scratch, tmp2); 3041 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3042 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3043 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, ok); 3044 __ stop("unexpected profiling mismatch"); 3045 __ bind(ok); 3046 } 3047 #endif 3048 3049 } else { 3050 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3051 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3052 3053 // Already unknown: Nothing to do anymore. 3054 __ z_tmll(tmp2, TypeEntries::type_unknown); 3055 __ z_brc(Assembler::bcondAllOne, next); 3056 __ z_oill(tmp2, TypeEntries::type_unknown); 3057 __ z_bru(do_update); 3058 } 3059 } 3060 3061 __ bind(init_klass); 3062 // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3063 __ z_ogr(tmp2, tmp1); 3064 3065 __ bind(do_update); 3066 __ z_stg(tmp2, mdo_addr); 3067 3068 __ bind(next); 3069 } 3070 } 3071 3072 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3073 assert(op->crc()->is_single_cpu(), "crc must be register"); 3074 assert(op->val()->is_single_cpu(), "byte value must be register"); 3075 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3076 Register crc = op->crc()->as_register(); 3077 Register val = op->val()->as_register(); 3078 Register res = op->result_opr()->as_register(); 3079 3080 assert_different_registers(val, crc, res); 3081 3082 __ load_const_optimized(res, StubRoutines::crc_table_addr()); 3083 __ kernel_crc32_singleByteReg(crc, val, res, true); 3084 __ z_lgfr(res, crc); 3085 } 3086 3087 #undef __