1 /* 2 * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2024 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArrayKlass.hpp" 34 #include "ci/ciInstance.hpp" 35 #include "gc/shared/collectedHeap.hpp" 36 #include "memory/universe.hpp" 37 #include "nativeInst_s390.hpp" 38 #include "oops/objArrayKlass.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/safepointMechanism.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "utilities/macros.hpp" 44 #include "utilities/powerOfTwo.hpp" 45 #include "vmreg_s390.inline.hpp" 46 47 #define __ _masm-> 48 49 #ifndef PRODUCT 50 #undef __ 51 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm) : _masm)-> 52 #endif 53 54 //------------------------------------------------------------ 55 56 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 57 // Not used on ZARCH_64 58 ShouldNotCallThis(); 59 return false; 60 } 61 62 LIR_Opr LIR_Assembler::receiverOpr() { 63 return FrameMap::Z_R2_oop_opr; 64 } 65 66 LIR_Opr LIR_Assembler::osrBufferPointer() { 67 return FrameMap::Z_R2_opr; 68 } 69 70 int LIR_Assembler::initial_frame_size_in_bytes() const { 71 return in_bytes(frame_map()->framesize_in_bytes()); 72 } 73 74 // Inline cache check: done before the frame is built. 75 // The inline cached class is in Z_inline_cache(Z_R9). 76 // We fetch the class of the receiver and compare it with the cached class. 77 // If they do not match we jump to the slow case. 78 int LIR_Assembler::check_icache() { 79 return __ ic_check(CodeEntryAlignment); 80 } 81 82 void LIR_Assembler::clinit_barrier(ciMethod* method) { 83 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 84 85 Label L_skip_barrier; 86 Register klass = Z_R1_scratch; 87 88 metadata2reg(method->holder()->constant_encoding(), klass); 89 __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/); 90 91 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub()); 92 __ z_br(klass); 93 94 __ bind(L_skip_barrier); 95 } 96 97 void LIR_Assembler::osr_entry() { 98 // On-stack-replacement entry sequence (interpreter frame layout described in frame_s390.hpp): 99 // 100 // 1. Create a new compiled activation. 101 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 102 // at the osr_bci; it is not initialized. 103 // 3. Jump to the continuation address in compiled code to resume execution. 104 105 // OSR entry point 106 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 107 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 108 ValueStack* entry_state = osr_entry->end()->state(); 109 int number_of_locks = entry_state->locks_size(); 110 111 // Create a frame for the compiled activation. 112 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 113 114 // OSR buffer is 115 // 116 // locals[nlocals-1..0] 117 // monitors[number_of_locks-1..0] 118 // 119 // Locals is a direct copy of the interpreter frame so in the osr buffer 120 // the first slot in the local array is the last local from the interpreter 121 // and the last slot is local[0] (receiver) from the interpreter 122 // 123 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 124 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 125 // in the interpreter frame (the method lock if a sync method) 126 127 // Initialize monitors in the compiled activation. 128 // I0: pointer to osr buffer 129 // 130 // All other registers are dead at this point and the locals will be 131 // copied into place by code emitted in the IR. 132 133 Register OSR_buf = osrBufferPointer()->as_register(); 134 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 135 int monitor_offset = BytesPerWord * method()->max_locals() + 136 (2 * BytesPerWord) * (number_of_locks - 1); 137 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 138 // the OSR buffer using 2 word entries: first the lock and then 139 // the oop. 140 for (int i = 0; i < number_of_locks; i++) { 141 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 142 // Verify the interpreter's monitor has a non-null object. 143 __ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is null", __LINE__); 144 // Copy the lock field into the compiled activation. 145 __ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf); 146 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i)); 147 __ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf); 148 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i)); 149 } 150 } 151 } 152 153 // -------------------------------------------------------------------------------------------- 154 155 address LIR_Assembler::emit_call_c(address a) { 156 __ align_call_far_patchable(__ pc()); 157 address call_addr = __ call_c_opt(a); 158 if (call_addr == nullptr) { 159 bailout("const section overflow"); 160 } 161 return call_addr; 162 } 163 164 int LIR_Assembler::emit_exception_handler() { 165 // Generate code for exception handler. 166 address handler_base = __ start_a_stub(exception_handler_size()); 167 if (handler_base == nullptr) { 168 // Not enough space left for the handler. 169 bailout("exception handler overflow"); 170 return -1; 171 } 172 173 int offset = code_offset(); 174 175 address a = Runtime1::entry_for (C1StubId::handle_exception_from_callee_id); 176 address call_addr = emit_call_c(a); 177 CHECK_BAILOUT_(-1); 178 __ should_not_reach_here(); 179 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 180 __ end_a_stub(); 181 182 return offset; 183 } 184 185 // Emit the code to remove the frame from the stack in the exception 186 // unwind path. 187 int LIR_Assembler::emit_unwind_handler() { 188 #ifndef PRODUCT 189 if (CommentedAssembly) { 190 _masm->block_comment("Unwind handler"); 191 } 192 #endif 193 194 int offset = code_offset(); 195 Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved. 196 Register Rtmp1 = Z_R11; 197 Register Rtmp2 = Z_R12; 198 199 // Fetch the exception from TLS and clear out exception related thread state. 200 Address exc_oop_addr = Address(Z_thread, JavaThread::exception_oop_offset()); 201 Address exc_pc_addr = Address(Z_thread, JavaThread::exception_pc_offset()); 202 __ z_lg(Z_EXC_OOP, exc_oop_addr); 203 __ clear_mem(exc_oop_addr, sizeof(oop)); 204 __ clear_mem(exc_pc_addr, sizeof(intptr_t)); 205 206 __ bind(_unwind_handler_entry); 207 __ verify_not_null_oop(Z_EXC_OOP); 208 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 209 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); // Preserve the exception. 210 } 211 212 // Perform needed unlocking. 213 MonitorExitStub* stub = nullptr; 214 if (method()->is_synchronized()) { 215 // C1StubId::monitorexit_id expects lock address in Z_R1_scratch. 216 LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); 217 monitor_address(0, lock); 218 stub = new MonitorExitStub(lock, true, 0); 219 if (LockingMode == LM_MONITOR) { 220 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 221 } else { 222 __ unlock_object(Rtmp1, Rtmp2, lock->as_register(), *stub->entry()); 223 } 224 __ bind(*stub->continuation()); 225 } 226 227 if (compilation()->env()->dtrace_method_probes()) { 228 ShouldNotReachHere(); // Not supported. 229 #if 0 230 __ mov(rdi, r15_thread); 231 __ mov_metadata(rsi, method()->constant_encoding()); 232 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 233 #endif 234 } 235 236 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 237 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); // Restore the exception. 238 } 239 240 // Remove the activation and dispatch to the unwind handler. 241 __ pop_frame(); 242 __ z_lg(Z_EXC_PC, _z_common_abi(return_pc), Z_SP); 243 244 // Z_EXC_OOP: exception oop 245 // Z_EXC_PC: exception pc 246 247 // Dispatch to the unwind logic. 248 __ load_const_optimized(Z_R5, Runtime1::entry_for (C1StubId::unwind_exception_id)); 249 __ z_br(Z_R5); 250 251 // Emit the slow path assembly. 252 if (stub != nullptr) { 253 stub->emit_code(this); 254 } 255 256 return offset; 257 } 258 259 int LIR_Assembler::emit_deopt_handler() { 260 // Generate code for exception handler. 261 address handler_base = __ start_a_stub(deopt_handler_size()); 262 if (handler_base == nullptr) { 263 // Not enough space left for the handler. 264 bailout("deopt handler overflow"); 265 return -1; 266 } int offset = code_offset(); 267 // Size must be constant (see HandlerImpl::emit_deopt_handler). 268 __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack()); 269 __ call(Z_R1_scratch); 270 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 271 __ end_a_stub(); 272 273 return offset; 274 } 275 276 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 277 if (o == nullptr) { 278 __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove. 279 } else { 280 AddressLiteral a = __ allocate_oop_address(o); 281 bool success = __ load_oop_from_toc(reg, a, reg); 282 if (!success) { 283 bailout("const section overflow"); 284 } 285 } 286 } 287 288 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 289 // Allocate a new index in table to hold the object once it's been patched. 290 int oop_index = __ oop_recorder()->allocate_oop_index(nullptr); 291 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 292 293 AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index)); 294 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 295 // The null will be dynamically patched later so the sequence to 296 // load the address literal must not be optimized. 297 __ load_const(reg, addrlit); 298 299 patching_epilog(patch, lir_patch_normal, reg, info); 300 } 301 302 void LIR_Assembler::metadata2reg(Metadata* md, Register reg) { 303 bool success = __ set_metadata_constant(md, reg); 304 if (!success) { 305 bailout("const section overflow"); 306 return; 307 } 308 } 309 310 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 311 // Allocate a new index in table to hold the klass once it's been patched. 312 int index = __ oop_recorder()->allocate_metadata_index(nullptr); 313 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 314 AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index)); 315 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 316 // The null will be dynamically patched later so the sequence to 317 // load the address literal must not be optimized. 318 __ load_const(reg, addrlit); 319 320 patching_epilog(patch, lir_patch_normal, reg, info); 321 } 322 323 void LIR_Assembler::emit_op3(LIR_Op3* op) { 324 switch (op->code()) { 325 case lir_idiv: 326 case lir_irem: 327 arithmetic_idiv(op->code(), 328 op->in_opr1(), 329 op->in_opr2(), 330 op->in_opr3(), 331 op->result_opr(), 332 op->info()); 333 break; 334 case lir_fmad: { 335 const FloatRegister opr1 = op->in_opr1()->as_double_reg(), 336 opr2 = op->in_opr2()->as_double_reg(), 337 opr3 = op->in_opr3()->as_double_reg(), 338 res = op->result_opr()->as_double_reg(); 339 __ z_madbr(opr3, opr1, opr2); 340 if (res != opr3) { __ z_ldr(res, opr3); } 341 } break; 342 case lir_fmaf: { 343 const FloatRegister opr1 = op->in_opr1()->as_float_reg(), 344 opr2 = op->in_opr2()->as_float_reg(), 345 opr3 = op->in_opr3()->as_float_reg(), 346 res = op->result_opr()->as_float_reg(); 347 __ z_maebr(opr3, opr1, opr2); 348 if (res != opr3) { __ z_ler(res, opr3); } 349 } break; 350 default: ShouldNotReachHere(); break; 351 } 352 } 353 354 355 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 356 #ifdef ASSERT 357 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 358 if (op->block() != nullptr) { _branch_target_blocks.append(op->block()); } 359 if (op->ublock() != nullptr) { _branch_target_blocks.append(op->ublock()); } 360 #endif 361 362 if (op->cond() == lir_cond_always) { 363 if (op->info() != nullptr) { add_debug_info_for_branch(op->info()); } 364 __ branch_optimized(Assembler::bcondAlways, *(op->label())); 365 } else { 366 Assembler::branch_condition acond = Assembler::bcondZero; 367 if (op->code() == lir_cond_float_branch) { 368 assert(op->ublock() != nullptr, "must have unordered successor"); 369 __ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label())); 370 } 371 switch (op->cond()) { 372 case lir_cond_equal: acond = Assembler::bcondEqual; break; 373 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; break; 374 case lir_cond_less: acond = Assembler::bcondLow; break; 375 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; break; 376 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; break; 377 case lir_cond_greater: acond = Assembler::bcondHigh; break; 378 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; break; 379 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; break; 380 default: ShouldNotReachHere(); 381 } 382 __ branch_optimized(acond,*(op->label())); 383 } 384 } 385 386 387 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 388 LIR_Opr src = op->in_opr(); 389 LIR_Opr dest = op->result_opr(); 390 391 switch (op->bytecode()) { 392 case Bytecodes::_i2l: 393 __ move_reg_if_needed(dest->as_register_lo(), T_LONG, src->as_register(), T_INT); 394 break; 395 396 case Bytecodes::_l2i: 397 __ move_reg_if_needed(dest->as_register(), T_INT, src->as_register_lo(), T_LONG); 398 break; 399 400 case Bytecodes::_i2b: 401 __ move_reg_if_needed(dest->as_register(), T_BYTE, src->as_register(), T_INT); 402 break; 403 404 case Bytecodes::_i2c: 405 __ move_reg_if_needed(dest->as_register(), T_CHAR, src->as_register(), T_INT); 406 break; 407 408 case Bytecodes::_i2s: 409 __ move_reg_if_needed(dest->as_register(), T_SHORT, src->as_register(), T_INT); 410 break; 411 412 case Bytecodes::_f2d: 413 assert(dest->is_double_fpu(), "check"); 414 __ move_freg_if_needed(dest->as_double_reg(), T_DOUBLE, src->as_float_reg(), T_FLOAT); 415 break; 416 417 case Bytecodes::_d2f: 418 assert(dest->is_single_fpu(), "check"); 419 __ move_freg_if_needed(dest->as_float_reg(), T_FLOAT, src->as_double_reg(), T_DOUBLE); 420 break; 421 422 case Bytecodes::_i2f: 423 __ z_cefbr(dest->as_float_reg(), src->as_register()); 424 break; 425 426 case Bytecodes::_i2d: 427 __ z_cdfbr(dest->as_double_reg(), src->as_register()); 428 break; 429 430 case Bytecodes::_l2f: 431 __ z_cegbr(dest->as_float_reg(), src->as_register_lo()); 432 break; 433 case Bytecodes::_l2d: 434 __ z_cdgbr(dest->as_double_reg(), src->as_register_lo()); 435 break; 436 437 case Bytecodes::_f2i: 438 case Bytecodes::_f2l: { 439 Label done; 440 FloatRegister Rsrc = src->as_float_reg(); 441 Register Rdst = (op->bytecode() == Bytecodes::_f2i ? dest->as_register() : dest->as_register_lo()); 442 __ clear_reg(Rdst, true, false); 443 __ z_cebr(Rsrc, Rsrc); 444 __ z_brno(done); // NaN -> 0 445 if (op->bytecode() == Bytecodes::_f2i) { 446 __ z_cfebr(Rdst, Rsrc, Assembler::to_zero); 447 } else { // op->bytecode() == Bytecodes::_f2l 448 __ z_cgebr(Rdst, Rsrc, Assembler::to_zero); 449 } 450 __ bind(done); 451 } 452 break; 453 454 case Bytecodes::_d2i: 455 case Bytecodes::_d2l: { 456 Label done; 457 FloatRegister Rsrc = src->as_double_reg(); 458 Register Rdst = (op->bytecode() == Bytecodes::_d2i ? dest->as_register() : dest->as_register_lo()); 459 __ clear_reg(Rdst, true, false); // Don't set CC. 460 __ z_cdbr(Rsrc, Rsrc); 461 __ z_brno(done); // NaN -> 0 462 if (op->bytecode() == Bytecodes::_d2i) { 463 __ z_cfdbr(Rdst, Rsrc, Assembler::to_zero); 464 } else { // Bytecodes::_d2l 465 __ z_cgdbr(Rdst, Rsrc, Assembler::to_zero); 466 } 467 __ bind(done); 468 } 469 break; 470 471 default: ShouldNotReachHere(); 472 } 473 } 474 475 void LIR_Assembler::align_call(LIR_Code code) { 476 // End of call instruction must be 4 byte aligned. 477 int offset = __ offset(); 478 switch (code) { 479 case lir_icvirtual_call: 480 offset += MacroAssembler::load_const_from_toc_size(); 481 // no break 482 case lir_static_call: 483 case lir_optvirtual_call: 484 case lir_dynamic_call: 485 offset += NativeCall::call_far_pcrelative_displacement_offset; 486 break; 487 default: ShouldNotReachHere(); 488 } 489 if ((offset & (NativeCall::call_far_pcrelative_displacement_alignment-1)) != 0) { 490 __ nop(); 491 } 492 } 493 494 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 495 assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0, 496 "must be aligned (offset=%d)", __ offset()); 497 assert(rtype == relocInfo::none || 498 rtype == relocInfo::opt_virtual_call_type || 499 rtype == relocInfo::static_call_type, "unexpected rtype"); 500 // Prepend each BRASL with a nop. 501 __ relocate(rtype); 502 __ z_nop(); 503 __ z_brasl(Z_R14, op->addr()); 504 add_call_info(code_offset(), op->info()); 505 } 506 507 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 508 address virtual_call_oop_addr = nullptr; 509 AddressLiteral empty_ic((address) Universe::non_oop_word()); 510 virtual_call_oop_addr = __ pc(); 511 bool success = __ load_const_from_toc(Z_inline_cache, empty_ic); 512 if (!success) { 513 bailout("const section overflow"); 514 return; 515 } 516 517 // CALL to fixup routine. Fixup routine uses ScopeDesc info 518 // to determine who we intended to call. 519 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); 520 call(op, relocInfo::none); 521 } 522 523 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 524 if (from_reg != to_reg) __ z_lgr(to_reg, from_reg); 525 } 526 527 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 528 assert(src->is_constant(), "should not call otherwise"); 529 assert(dest->is_stack(), "should not call otherwise"); 530 LIR_Const* c = src->as_constant_ptr(); 531 532 unsigned int lmem = 0; 533 unsigned int lcon = 0; 534 int64_t cbits = 0; 535 Address dest_addr; 536 switch (c->type()) { 537 case T_INT: // fall through 538 case T_FLOAT: 539 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 540 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 541 break; 542 543 case T_ADDRESS: 544 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 545 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 546 break; 547 548 case T_OBJECT: 549 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 550 if (c->as_jobject() == nullptr) { 551 __ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8); 552 } else { 553 jobject2reg(c->as_jobject(), Z_R1_scratch); 554 __ reg2mem_opt(Z_R1_scratch, dest_addr, true); 555 } 556 return; 557 558 case T_LONG: // fall through 559 case T_DOUBLE: 560 dest_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 561 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 562 break; 563 564 default: 565 ShouldNotReachHere(); 566 } 567 568 __ store_const(dest_addr, cbits, lmem, lcon); 569 } 570 571 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 572 assert(src->is_constant(), "should not call otherwise"); 573 assert(dest->is_address(), "should not call otherwise"); 574 575 LIR_Const* c = src->as_constant_ptr(); 576 Address addr = as_Address(dest->as_address_ptr()); 577 578 int store_offset = -1; 579 580 if (dest->as_address_ptr()->index()->is_valid()) { 581 switch (type) { 582 case T_INT: // fall through 583 case T_FLOAT: 584 __ load_const_optimized(Z_R0_scratch, c->as_jint_bits()); 585 store_offset = __ offset(); 586 if (Immediate::is_uimm12(addr.disp())) { 587 __ z_st(Z_R0_scratch, addr); 588 } else { 589 __ z_sty(Z_R0_scratch, addr); 590 } 591 break; 592 593 case T_ADDRESS: 594 __ load_const_optimized(Z_R1_scratch, c->as_jint_bits()); 595 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 596 break; 597 598 case T_OBJECT: // fall through 599 case T_ARRAY: 600 if (c->as_jobject() == nullptr) { 601 if (UseCompressedOops && !wide) { 602 __ clear_reg(Z_R1_scratch, false); 603 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 604 } else { 605 __ clear_reg(Z_R1_scratch, true); 606 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 607 } 608 } else { 609 jobject2reg(c->as_jobject(), Z_R1_scratch); 610 if (UseCompressedOops && !wide) { 611 __ encode_heap_oop(Z_R1_scratch); 612 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 613 } else { 614 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 615 } 616 } 617 assert(store_offset >= 0, "check"); 618 break; 619 620 case T_LONG: // fall through 621 case T_DOUBLE: 622 __ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits())); 623 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 624 break; 625 626 case T_BOOLEAN: // fall through 627 case T_BYTE: 628 __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint())); 629 store_offset = __ offset(); 630 if (Immediate::is_uimm12(addr.disp())) { 631 __ z_stc(Z_R0_scratch, addr); 632 } else { 633 __ z_stcy(Z_R0_scratch, addr); 634 } 635 break; 636 637 case T_CHAR: // fall through 638 case T_SHORT: 639 __ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint())); 640 store_offset = __ offset(); 641 if (Immediate::is_uimm12(addr.disp())) { 642 __ z_sth(Z_R0_scratch, addr); 643 } else { 644 __ z_sthy(Z_R0_scratch, addr); 645 } 646 break; 647 648 default: 649 ShouldNotReachHere(); 650 } 651 652 } else { // no index 653 654 unsigned int lmem = 0; 655 unsigned int lcon = 0; 656 int64_t cbits = 0; 657 658 switch (type) { 659 case T_INT: // fall through 660 case T_FLOAT: 661 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 662 break; 663 664 case T_ADDRESS: 665 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 666 break; 667 668 case T_OBJECT: // fall through 669 case T_ARRAY: 670 if (c->as_jobject() == nullptr) { 671 if (UseCompressedOops && !wide) { 672 store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4); 673 } else { 674 store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8); 675 } 676 } else { 677 jobject2reg(c->as_jobject(), Z_R1_scratch); 678 if (UseCompressedOops && !wide) { 679 __ encode_heap_oop(Z_R1_scratch); 680 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 681 } else { 682 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 683 } 684 } 685 assert(store_offset >= 0, "check"); 686 break; 687 688 case T_LONG: // fall through 689 case T_DOUBLE: 690 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 691 break; 692 693 case T_BOOLEAN: // fall through 694 case T_BYTE: 695 lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint()); 696 break; 697 698 case T_CHAR: // fall through 699 case T_SHORT: 700 lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint()); 701 break; 702 703 default: 704 ShouldNotReachHere(); 705 } 706 707 if (store_offset == -1) { 708 store_offset = __ store_const(addr, cbits, lmem, lcon); 709 assert(store_offset >= 0, "check"); 710 } 711 } 712 713 if (info != nullptr) { 714 add_debug_info_for_null_check(store_offset, info); 715 } 716 } 717 718 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 719 assert(src->is_constant(), "should not call otherwise"); 720 assert(dest->is_register(), "should not call otherwise"); 721 LIR_Const* c = src->as_constant_ptr(); 722 723 switch (c->type()) { 724 case T_INT: { 725 assert(patch_code == lir_patch_none, "no patching handled here"); 726 __ load_const_optimized(dest->as_register(), c->as_jint()); 727 break; 728 } 729 730 case T_ADDRESS: { 731 assert(patch_code == lir_patch_none, "no patching handled here"); 732 __ load_const_optimized(dest->as_register(), c->as_jint()); 733 break; 734 } 735 736 case T_LONG: { 737 assert(patch_code == lir_patch_none, "no patching handled here"); 738 __ load_const_optimized(dest->as_register_lo(), (intptr_t)c->as_jlong()); 739 break; 740 } 741 742 case T_OBJECT: { 743 if (patch_code != lir_patch_none) { 744 jobject2reg_with_patching(dest->as_register(), info); 745 } else { 746 jobject2reg(c->as_jobject(), dest->as_register()); 747 } 748 break; 749 } 750 751 case T_METADATA: { 752 if (patch_code != lir_patch_none) { 753 klass2reg_with_patching(dest->as_register(), info); 754 } else { 755 metadata2reg(c->as_metadata(), dest->as_register()); 756 } 757 break; 758 } 759 760 case T_FLOAT: { 761 Register toc_reg = Z_R1_scratch; 762 __ load_toc(toc_reg); 763 address const_addr = __ float_constant(c->as_jfloat()); 764 if (const_addr == nullptr) { 765 bailout("const section overflow"); 766 break; 767 } 768 int displ = const_addr - _masm->code()->consts()->start(); 769 if (dest->is_single_fpu()) { 770 __ z_ley(dest->as_float_reg(), displ, toc_reg); 771 } else { 772 assert(dest->is_single_cpu(), "Must be a cpu register."); 773 __ z_ly(dest->as_register(), displ, toc_reg); 774 } 775 } 776 break; 777 778 case T_DOUBLE: { 779 Register toc_reg = Z_R1_scratch; 780 __ load_toc(toc_reg); 781 address const_addr = __ double_constant(c->as_jdouble()); 782 if (const_addr == nullptr) { 783 bailout("const section overflow"); 784 break; 785 } 786 int displ = const_addr - _masm->code()->consts()->start(); 787 if (dest->is_double_fpu()) { 788 __ z_ldy(dest->as_double_reg(), displ, toc_reg); 789 } else { 790 assert(dest->is_double_cpu(), "Must be a long register."); 791 __ z_lg(dest->as_register_lo(), displ, toc_reg); 792 } 793 } 794 break; 795 796 default: 797 ShouldNotReachHere(); 798 } 799 } 800 801 Address LIR_Assembler::as_Address(LIR_Address* addr) { 802 if (addr->base()->is_illegal()) { 803 Unimplemented(); 804 } 805 806 Register base = addr->base()->as_pointer_register(); 807 808 if (addr->index()->is_illegal()) { 809 return Address(base, addr->disp()); 810 } else if (addr->index()->is_cpu_register()) { 811 Register index = addr->index()->as_pointer_register(); 812 return Address(base, index, addr->disp()); 813 } else if (addr->index()->is_constant()) { 814 intptr_t addr_offset = addr->index()->as_constant_ptr()->as_jint() + addr->disp(); 815 return Address(base, addr_offset); 816 } else { 817 ShouldNotReachHere(); 818 return Address(); 819 } 820 } 821 822 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 823 switch (type) { 824 case T_INT: 825 case T_FLOAT: { 826 Register tmp = Z_R1_scratch; 827 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 828 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 829 __ mem2reg_opt(tmp, from, false); 830 __ reg2mem_opt(tmp, to, false); 831 break; 832 } 833 case T_ADDRESS: 834 case T_OBJECT: { 835 Register tmp = Z_R1_scratch; 836 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 837 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 838 __ mem2reg_opt(tmp, from, true); 839 __ reg2mem_opt(tmp, to, true); 840 break; 841 } 842 case T_LONG: 843 case T_DOUBLE: { 844 Register tmp = Z_R1_scratch; 845 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 846 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 847 __ mem2reg_opt(tmp, from, true); 848 __ reg2mem_opt(tmp, to, true); 849 break; 850 } 851 852 default: 853 ShouldNotReachHere(); 854 } 855 } 856 857 // 4-byte accesses only! Don't use it to access 8 bytes! 858 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 859 ShouldNotCallThis(); 860 return Address(); // unused 861 } 862 863 // 4-byte accesses only! Don't use it to access 8 bytes! 864 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 865 ShouldNotCallThis(); 866 return Address(); // unused 867 } 868 869 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, 870 CodeEmitInfo* info, bool wide) { 871 872 assert(type != T_METADATA, "load of metadata ptr not supported"); 873 LIR_Address* addr = src_opr->as_address_ptr(); 874 LIR_Opr to_reg = dest; 875 876 Register src = addr->base()->as_pointer_register(); 877 Register disp_reg = Z_R0; 878 int disp_value = addr->disp(); 879 bool needs_patching = (patch_code != lir_patch_none); 880 881 if (addr->base()->type() == T_OBJECT) { 882 __ verify_oop(src, FILE_AND_LINE); 883 } 884 885 PatchingStub* patch = nullptr; 886 if (needs_patching) { 887 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 888 assert(!to_reg->is_double_cpu() || 889 patch_code == lir_patch_none || 890 patch_code == lir_patch_normal, "patching doesn't match register"); 891 } 892 893 if (addr->index()->is_illegal()) { 894 if (!Immediate::is_simm20(disp_value)) { 895 if (needs_patching) { 896 __ load_const(Z_R1_scratch, (intptr_t)0); 897 } else { 898 __ load_const_optimized(Z_R1_scratch, disp_value); 899 } 900 disp_reg = Z_R1_scratch; 901 disp_value = 0; 902 } 903 } else { 904 if (!Immediate::is_simm20(disp_value)) { 905 __ load_const_optimized(Z_R1_scratch, disp_value); 906 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 907 disp_reg = Z_R1_scratch; 908 disp_value = 0; 909 } 910 disp_reg = addr->index()->as_pointer_register(); 911 } 912 913 // Remember the offset of the load. The patching_epilog must be done 914 // before the call to add_debug_info, otherwise the PcDescs don't get 915 // entered in increasing order. 916 int offset = code_offset(); 917 918 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 919 920 bool short_disp = Immediate::is_uimm12(disp_value); 921 922 switch (type) { 923 case T_BOOLEAN: // fall through 924 case T_BYTE : __ z_lb(dest->as_register(), disp_value, disp_reg, src); break; 925 case T_CHAR : __ z_llgh(dest->as_register(), disp_value, disp_reg, src); break; 926 case T_SHORT : 927 if (short_disp) { 928 __ z_lh(dest->as_register(), disp_value, disp_reg, src); 929 } else { 930 __ z_lhy(dest->as_register(), disp_value, disp_reg, src); 931 } 932 break; 933 case T_INT : 934 if (short_disp) { 935 __ z_l(dest->as_register(), disp_value, disp_reg, src); 936 } else { 937 __ z_ly(dest->as_register(), disp_value, disp_reg, src); 938 } 939 break; 940 case T_ADDRESS: 941 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 942 break; 943 case T_ARRAY : // fall through 944 case T_OBJECT: 945 { 946 if (UseCompressedOops && !wide) { 947 __ z_llgf(dest->as_register(), disp_value, disp_reg, src); 948 __ oop_decoder(dest->as_register(), dest->as_register(), true); 949 } else { 950 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 951 } 952 __ verify_oop(dest->as_register(), FILE_AND_LINE); 953 break; 954 } 955 case T_FLOAT: 956 if (short_disp) { 957 __ z_le(dest->as_float_reg(), disp_value, disp_reg, src); 958 } else { 959 __ z_ley(dest->as_float_reg(), disp_value, disp_reg, src); 960 } 961 break; 962 case T_DOUBLE: 963 if (short_disp) { 964 __ z_ld(dest->as_double_reg(), disp_value, disp_reg, src); 965 } else { 966 __ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src); 967 } 968 break; 969 case T_LONG : __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break; 970 default : ShouldNotReachHere(); 971 } 972 973 if (patch != nullptr) { 974 patching_epilog(patch, patch_code, src, info); 975 } 976 if (info != nullptr) add_debug_info_for_null_check(offset, info); 977 } 978 979 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 980 assert(src->is_stack(), "should not call otherwise"); 981 assert(dest->is_register(), "should not call otherwise"); 982 983 if (dest->is_single_cpu()) { 984 if (is_reference_type(type)) { 985 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 986 __ verify_oop(dest->as_register(), FILE_AND_LINE); 987 } else if (type == T_METADATA || type == T_ADDRESS) { 988 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 989 } else { 990 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false); 991 } 992 } else if (dest->is_double_cpu()) { 993 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix()); 994 __ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true); 995 } else if (dest->is_single_fpu()) { 996 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 997 __ mem2freg_opt(dest->as_float_reg(), src_addr, false); 998 } else if (dest->is_double_fpu()) { 999 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1000 __ mem2freg_opt(dest->as_double_reg(), src_addr, true); 1001 } else { 1002 ShouldNotReachHere(); 1003 } 1004 } 1005 1006 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1007 assert(src->is_register(), "should not call otherwise"); 1008 assert(dest->is_stack(), "should not call otherwise"); 1009 1010 if (src->is_single_cpu()) { 1011 const Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 1012 if (is_reference_type(type)) { 1013 __ verify_oop(src->as_register(), FILE_AND_LINE); 1014 __ reg2mem_opt(src->as_register(), dst, true); 1015 } else if (type == T_METADATA || type == T_ADDRESS) { 1016 __ reg2mem_opt(src->as_register(), dst, true); 1017 } else { 1018 __ reg2mem_opt(src->as_register(), dst, false); 1019 } 1020 } else if (src->is_double_cpu()) { 1021 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix()); 1022 __ reg2mem_opt(src->as_register_lo(), dstLO, true); 1023 } else if (src->is_single_fpu()) { 1024 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1025 __ freg2mem_opt(src->as_float_reg(), dst_addr, false); 1026 } else if (src->is_double_fpu()) { 1027 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1028 __ freg2mem_opt(src->as_double_reg(), dst_addr, true); 1029 } else { 1030 ShouldNotReachHere(); 1031 } 1032 } 1033 1034 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1035 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1036 if (from_reg->is_double_fpu()) { 1037 // double to double moves 1038 assert(to_reg->is_double_fpu(), "should match"); 1039 __ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg()); 1040 } else { 1041 // float to float moves 1042 assert(to_reg->is_single_fpu(), "should match"); 1043 __ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg()); 1044 } 1045 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1046 if (from_reg->is_double_cpu()) { 1047 __ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1048 } else if (to_reg->is_double_cpu()) { 1049 // int to int moves 1050 __ z_lgr(to_reg->as_register_lo(), from_reg->as_register()); 1051 } else { 1052 // int to int moves 1053 __ z_lgr(to_reg->as_register(), from_reg->as_register()); 1054 } 1055 } else { 1056 ShouldNotReachHere(); 1057 } 1058 if (is_reference_type(to_reg->type())) { 1059 __ verify_oop(to_reg->as_register(), FILE_AND_LINE); 1060 } 1061 } 1062 1063 void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type, 1064 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1065 bool wide) { 1066 assert(type != T_METADATA, "store of metadata ptr not supported"); 1067 LIR_Address* addr = dest_opr->as_address_ptr(); 1068 1069 Register dest = addr->base()->as_pointer_register(); 1070 Register disp_reg = Z_R0; 1071 int disp_value = addr->disp(); 1072 bool needs_patching = (patch_code != lir_patch_none); 1073 1074 if (addr->base()->is_oop_register()) { 1075 __ verify_oop(dest, FILE_AND_LINE); 1076 } 1077 1078 PatchingStub* patch = nullptr; 1079 if (needs_patching) { 1080 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1081 assert(!from->is_double_cpu() || 1082 patch_code == lir_patch_none || 1083 patch_code == lir_patch_normal, "patching doesn't match register"); 1084 } 1085 1086 assert(!needs_patching || (!Immediate::is_simm20(disp_value) && addr->index()->is_illegal()), "assumption"); 1087 if (addr->index()->is_illegal()) { 1088 if (!Immediate::is_simm20(disp_value)) { 1089 if (needs_patching) { 1090 __ load_const(Z_R1_scratch, (intptr_t)0); 1091 } else { 1092 __ load_const_optimized(Z_R1_scratch, disp_value); 1093 } 1094 disp_reg = Z_R1_scratch; 1095 disp_value = 0; 1096 } 1097 } else { 1098 if (!Immediate::is_simm20(disp_value)) { 1099 __ load_const_optimized(Z_R1_scratch, disp_value); 1100 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 1101 disp_reg = Z_R1_scratch; 1102 disp_value = 0; 1103 } 1104 disp_reg = addr->index()->as_pointer_register(); 1105 } 1106 1107 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 1108 1109 if (is_reference_type(type)) { 1110 __ verify_oop(from->as_register(), FILE_AND_LINE); 1111 } 1112 1113 bool short_disp = Immediate::is_uimm12(disp_value); 1114 1115 // Remember the offset of the store. The patching_epilog must be done 1116 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1117 // entered in increasing order. 1118 int offset = code_offset(); 1119 switch (type) { 1120 case T_BOOLEAN: // fall through 1121 case T_BYTE : 1122 if (short_disp) { 1123 __ z_stc(from->as_register(), disp_value, disp_reg, dest); 1124 } else { 1125 __ z_stcy(from->as_register(), disp_value, disp_reg, dest); 1126 } 1127 break; 1128 case T_CHAR : // fall through 1129 case T_SHORT : 1130 if (short_disp) { 1131 __ z_sth(from->as_register(), disp_value, disp_reg, dest); 1132 } else { 1133 __ z_sthy(from->as_register(), disp_value, disp_reg, dest); 1134 } 1135 break; 1136 case T_INT : 1137 if (short_disp) { 1138 __ z_st(from->as_register(), disp_value, disp_reg, dest); 1139 } else { 1140 __ z_sty(from->as_register(), disp_value, disp_reg, dest); 1141 } 1142 break; 1143 case T_LONG : __ z_stg(from->as_register_lo(), disp_value, disp_reg, dest); break; 1144 case T_ADDRESS: __ z_stg(from->as_register(), disp_value, disp_reg, dest); break; 1145 break; 1146 case T_ARRAY : // fall through 1147 case T_OBJECT: 1148 { 1149 if (UseCompressedOops && !wide) { 1150 Register compressed_src = Z_R14; 1151 __ oop_encoder(compressed_src, from->as_register(), true, (disp_reg != Z_R1) ? Z_R1 : Z_R0, -1, true); 1152 offset = code_offset(); 1153 if (short_disp) { 1154 __ z_st(compressed_src, disp_value, disp_reg, dest); 1155 } else { 1156 __ z_sty(compressed_src, disp_value, disp_reg, dest); 1157 } 1158 } else { 1159 __ z_stg(from->as_register(), disp_value, disp_reg, dest); 1160 } 1161 break; 1162 } 1163 case T_FLOAT : 1164 if (short_disp) { 1165 __ z_ste(from->as_float_reg(), disp_value, disp_reg, dest); 1166 } else { 1167 __ z_stey(from->as_float_reg(), disp_value, disp_reg, dest); 1168 } 1169 break; 1170 case T_DOUBLE: 1171 if (short_disp) { 1172 __ z_std(from->as_double_reg(), disp_value, disp_reg, dest); 1173 } else { 1174 __ z_stdy(from->as_double_reg(), disp_value, disp_reg, dest); 1175 } 1176 break; 1177 default: ShouldNotReachHere(); 1178 } 1179 1180 if (patch != nullptr) { 1181 patching_epilog(patch, patch_code, dest, info); 1182 } 1183 1184 if (info != nullptr) add_debug_info_for_null_check(offset, info); 1185 } 1186 1187 1188 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 1189 assert(result->is_illegal() || 1190 (result->is_single_cpu() && result->as_register() == Z_R2) || 1191 (result->is_double_cpu() && result->as_register_lo() == Z_R2) || 1192 (result->is_single_fpu() && result->as_float_reg() == Z_F0) || 1193 (result->is_double_fpu() && result->as_double_reg() == Z_F0), "convention"); 1194 1195 __ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::polling_page_offset())); 1196 1197 // Pop the frame before the safepoint code. 1198 __ pop_frame_restore_retPC(initial_frame_size_in_bytes()); 1199 1200 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1201 __ reserved_stack_check(Z_R14); 1202 } 1203 1204 // We need to mark the code position where the load from the safepoint 1205 // polling page was emitted as relocInfo::poll_return_type here. 1206 __ relocate(relocInfo::poll_return_type); 1207 __ load_from_polling_page(Z_R1_scratch); 1208 1209 __ z_br(Z_R14); // Return to caller. 1210 } 1211 1212 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1213 const Register poll_addr = tmp->as_register_lo(); 1214 __ z_lg(poll_addr, Address(Z_thread, JavaThread::polling_page_offset())); 1215 guarantee(info != nullptr, "Shouldn't be null"); 1216 add_debug_info_for_branch(info); 1217 int offset = __ offset(); 1218 __ relocate(relocInfo::poll_type); 1219 __ load_from_polling_page(poll_addr); 1220 return offset; 1221 } 1222 1223 void LIR_Assembler::emit_static_call_stub() { 1224 1225 // Stub is fixed up when the corresponding call is converted from calling 1226 // compiled code to calling interpreted code. 1227 1228 address call_pc = __ pc(); 1229 address stub = __ start_a_stub(call_stub_size()); 1230 if (stub == nullptr) { 1231 bailout("static call stub overflow"); 1232 return; 1233 } 1234 1235 int start = __ offset(); 1236 1237 __ relocate(static_stub_Relocation::spec(call_pc)); 1238 1239 // See also Matcher::interpreter_method_reg(). 1240 AddressLiteral meta = __ allocate_metadata_address(nullptr); 1241 bool success = __ load_const_from_toc(Z_method, meta); 1242 1243 __ set_inst_mark(); 1244 AddressLiteral a((address)-1); 1245 success = success && __ load_const_from_toc(Z_R1, a); 1246 if (!success) { 1247 bailout("const section overflow"); 1248 return; 1249 } 1250 1251 __ z_br(Z_R1); 1252 assert(__ offset() - start <= call_stub_size(), "stub too big"); 1253 __ end_a_stub(); // Update current stubs pointer and restore insts_end. 1254 } 1255 1256 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1257 bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual; 1258 if (opr1->is_single_cpu()) { 1259 Register reg1 = opr1->as_register(); 1260 if (opr2->is_single_cpu()) { 1261 // cpu register - cpu register 1262 if (is_reference_type(opr1->type())) { 1263 __ z_clgr(reg1, opr2->as_register()); 1264 } else { 1265 assert(!is_reference_type(opr2->type()), "cmp int, oop?"); 1266 if (unsigned_comp) { 1267 __ z_clr(reg1, opr2->as_register()); 1268 } else { 1269 __ z_cr(reg1, opr2->as_register()); 1270 } 1271 } 1272 } else if (opr2->is_stack()) { 1273 // cpu register - stack 1274 if (is_reference_type(opr1->type())) { 1275 __ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1276 } else { 1277 if (unsigned_comp) { 1278 __ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1279 } else { 1280 __ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1281 } 1282 } 1283 } else if (opr2->is_constant()) { 1284 // cpu register - constant 1285 LIR_Const* c = opr2->as_constant_ptr(); 1286 if (c->type() == T_INT) { 1287 if (unsigned_comp) { 1288 __ z_clfi(reg1, c->as_jint()); 1289 } else { 1290 __ z_cfi(reg1, c->as_jint()); 1291 } 1292 } else if (c->type() == T_METADATA) { 1293 // We only need, for now, comparison with null for metadata. 1294 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1295 Metadata* m = c->as_metadata(); 1296 if (m == nullptr) { 1297 __ z_cghi(reg1, 0); 1298 } else { 1299 ShouldNotReachHere(); 1300 } 1301 } else if (is_reference_type(c->type())) { 1302 // In 64bit oops are single register. 1303 jobject o = c->as_jobject(); 1304 if (o == nullptr) { 1305 __ z_ltgr(reg1, reg1); 1306 } else { 1307 jobject2reg(o, Z_R1_scratch); 1308 __ z_cgr(reg1, Z_R1_scratch); 1309 } 1310 } else { 1311 fatal("unexpected type: %s", basictype_to_str(c->type())); 1312 } 1313 // cpu register - address 1314 } else if (opr2->is_address()) { 1315 if (op->info() != nullptr) { 1316 add_debug_info_for_null_check_here(op->info()); 1317 } 1318 if (unsigned_comp) { 1319 __ z_cly(reg1, as_Address(opr2->as_address_ptr())); 1320 } else { 1321 __ z_cy(reg1, as_Address(opr2->as_address_ptr())); 1322 } 1323 } else { 1324 ShouldNotReachHere(); 1325 } 1326 1327 } else if (opr1->is_double_cpu()) { 1328 assert(!unsigned_comp, "unexpected"); 1329 Register xlo = opr1->as_register_lo(); 1330 Register xhi = opr1->as_register_hi(); 1331 if (opr2->is_double_cpu()) { 1332 __ z_cgr(xlo, opr2->as_register_lo()); 1333 } else if (opr2->is_constant()) { 1334 // cpu register - constant 0 1335 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 1336 __ z_ltgr(xlo, xlo); 1337 } else { 1338 ShouldNotReachHere(); 1339 } 1340 1341 } else if (opr1->is_single_fpu()) { 1342 if (opr2->is_single_fpu()) { 1343 __ z_cebr(opr1->as_float_reg(), opr2->as_float_reg()); 1344 } else { 1345 // stack slot 1346 Address addr = frame_map()->address_for_slot(opr2->single_stack_ix()); 1347 if (Immediate::is_uimm12(addr.disp())) { 1348 __ z_ceb(opr1->as_float_reg(), addr); 1349 } else { 1350 __ z_ley(Z_fscratch_1, addr); 1351 __ z_cebr(opr1->as_float_reg(), Z_fscratch_1); 1352 } 1353 } 1354 } else if (opr1->is_double_fpu()) { 1355 if (opr2->is_double_fpu()) { 1356 __ z_cdbr(opr1->as_double_reg(), opr2->as_double_reg()); 1357 } else { 1358 // stack slot 1359 Address addr = frame_map()->address_for_slot(opr2->double_stack_ix()); 1360 if (Immediate::is_uimm12(addr.disp())) { 1361 __ z_cdb(opr1->as_double_reg(), addr); 1362 } else { 1363 __ z_ldy(Z_fscratch_1, addr); 1364 __ z_cdbr(opr1->as_double_reg(), Z_fscratch_1); 1365 } 1366 } 1367 } else { 1368 ShouldNotReachHere(); 1369 } 1370 } 1371 1372 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1373 Label done; 1374 Register dreg = dst->as_register(); 1375 1376 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1377 assert((left->is_single_fpu() && right->is_single_fpu()) || 1378 (left->is_double_fpu() && right->is_double_fpu()), "unexpected operand types"); 1379 bool is_single = left->is_single_fpu(); 1380 bool is_unordered_less = (code == lir_ucmp_fd2i); 1381 FloatRegister lreg = is_single ? left->as_float_reg() : left->as_double_reg(); 1382 FloatRegister rreg = is_single ? right->as_float_reg() : right->as_double_reg(); 1383 if (is_single) { 1384 __ z_cebr(lreg, rreg); 1385 } else { 1386 __ z_cdbr(lreg, rreg); 1387 } 1388 if (VM_Version::has_LoadStoreConditional()) { 1389 Register one = Z_R0_scratch; 1390 Register minus_one = Z_R1_scratch; 1391 __ z_lghi(minus_one, -1); 1392 __ z_lghi(one, 1); 1393 __ z_lghi(dreg, 0); 1394 __ z_locgr(dreg, one, is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered); 1395 __ z_locgr(dreg, minus_one, is_unordered_less ? Assembler::bcondLowOrNotOrdered : Assembler::bcondLow); 1396 } else { 1397 __ clear_reg(dreg, true, false); 1398 __ z_bre(done); // if (left == right) dst = 0 1399 1400 // if (left > right || ((code ~= cmpg) && (left <> right)) dst := 1 1401 __ z_lhi(dreg, 1); 1402 __ z_brc(is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered, done); 1403 1404 // if (left < right || ((code ~= cmpl) && (left <> right)) dst := -1 1405 __ z_lhi(dreg, -1); 1406 } 1407 } else { 1408 assert(code == lir_cmp_l2i, "check"); 1409 if (VM_Version::has_LoadStoreConditional()) { 1410 Register one = Z_R0_scratch; 1411 Register minus_one = Z_R1_scratch; 1412 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1413 __ z_lghi(minus_one, -1); 1414 __ z_lghi(one, 1); 1415 __ z_lghi(dreg, 0); 1416 __ z_locgr(dreg, one, Assembler::bcondHigh); 1417 __ z_locgr(dreg, minus_one, Assembler::bcondLow); 1418 } else { 1419 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1420 __ z_lghi(dreg, 0); // eq value 1421 __ z_bre(done); 1422 __ z_lghi(dreg, 1); // gt value 1423 __ z_brh(done); 1424 __ z_lghi(dreg, -1); // lt value 1425 } 1426 } 1427 __ bind(done); 1428 } 1429 1430 // result = condition ? opr1 : opr2 1431 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 1432 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 1433 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on s390"); 1434 1435 Assembler::branch_condition acond = Assembler::bcondEqual, ncond = Assembler::bcondNotEqual; 1436 switch (condition) { 1437 case lir_cond_equal: acond = Assembler::bcondEqual; ncond = Assembler::bcondNotEqual; break; 1438 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; ncond = Assembler::bcondEqual; break; 1439 case lir_cond_less: acond = Assembler::bcondLow; ncond = Assembler::bcondNotLow; break; 1440 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1441 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1442 case lir_cond_greater: acond = Assembler::bcondHigh; ncond = Assembler::bcondNotHigh; break; 1443 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1444 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1445 default: ShouldNotReachHere(); 1446 } 1447 1448 if (opr1->is_cpu_register()) { 1449 reg2reg(opr1, result); 1450 } else if (opr1->is_stack()) { 1451 stack2reg(opr1, result, result->type()); 1452 } else if (opr1->is_constant()) { 1453 const2reg(opr1, result, lir_patch_none, nullptr); 1454 } else { 1455 ShouldNotReachHere(); 1456 } 1457 1458 if (VM_Version::has_LoadStoreConditional() && !opr2->is_constant()) { 1459 // Optimized version that does not require a branch. 1460 if (opr2->is_single_cpu()) { 1461 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 1462 __ z_locgr(result->as_register(), opr2->as_register(), ncond); 1463 } else if (opr2->is_double_cpu()) { 1464 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1465 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1466 __ z_locgr(result->as_register_lo(), opr2->as_register_lo(), ncond); 1467 } else if (opr2->is_single_stack()) { 1468 __ z_loc(result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()), ncond); 1469 } else if (opr2->is_double_stack()) { 1470 __ z_locg(result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix()), ncond); 1471 } else { 1472 ShouldNotReachHere(); 1473 } 1474 } else { 1475 Label skip; 1476 __ z_brc(acond, skip); 1477 if (opr2->is_cpu_register()) { 1478 reg2reg(opr2, result); 1479 } else if (opr2->is_stack()) { 1480 stack2reg(opr2, result, result->type()); 1481 } else if (opr2->is_constant()) { 1482 const2reg(opr2, result, lir_patch_none, nullptr); 1483 } else { 1484 ShouldNotReachHere(); 1485 } 1486 __ bind(skip); 1487 } 1488 } 1489 1490 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1491 CodeEmitInfo* info, bool pop_fpu_stack) { 1492 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1493 1494 if (left->is_single_cpu()) { 1495 assert(left == dest, "left and dest must be equal"); 1496 Register lreg = left->as_register(); 1497 1498 if (right->is_single_cpu()) { 1499 // cpu register - cpu register 1500 Register rreg = right->as_register(); 1501 switch (code) { 1502 case lir_add: __ z_ar (lreg, rreg); break; 1503 case lir_sub: __ z_sr (lreg, rreg); break; 1504 case lir_mul: __ z_msr(lreg, rreg); break; 1505 default: ShouldNotReachHere(); 1506 } 1507 1508 } else if (right->is_stack()) { 1509 // cpu register - stack 1510 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1511 switch (code) { 1512 case lir_add: __ z_ay(lreg, raddr); break; 1513 case lir_sub: __ z_sy(lreg, raddr); break; 1514 default: ShouldNotReachHere(); 1515 } 1516 1517 } else if (right->is_constant()) { 1518 // cpu register - constant 1519 jint c = right->as_constant_ptr()->as_jint(); 1520 switch (code) { 1521 case lir_add: __ z_agfi(lreg, c); break; 1522 case lir_sub: __ z_agfi(lreg, -c); break; // note: -min_jint == min_jint 1523 case lir_mul: __ z_msfi(lreg, c); break; 1524 default: ShouldNotReachHere(); 1525 } 1526 1527 } else { 1528 ShouldNotReachHere(); 1529 } 1530 1531 } else if (left->is_double_cpu()) { 1532 assert(left == dest, "left and dest must be equal"); 1533 Register lreg_lo = left->as_register_lo(); 1534 Register lreg_hi = left->as_register_hi(); 1535 1536 if (right->is_double_cpu()) { 1537 // cpu register - cpu register 1538 Register rreg_lo = right->as_register_lo(); 1539 Register rreg_hi = right->as_register_hi(); 1540 assert_different_registers(lreg_lo, rreg_lo); 1541 switch (code) { 1542 case lir_add: 1543 __ z_agr(lreg_lo, rreg_lo); 1544 break; 1545 case lir_sub: 1546 __ z_sgr(lreg_lo, rreg_lo); 1547 break; 1548 case lir_mul: 1549 __ z_msgr(lreg_lo, rreg_lo); 1550 break; 1551 default: 1552 ShouldNotReachHere(); 1553 } 1554 1555 } else if (right->is_constant()) { 1556 // cpu register - constant 1557 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1558 switch (code) { 1559 case lir_add: __ z_agfi(lreg_lo, c); break; 1560 case lir_sub: 1561 if (c != min_jint) { 1562 __ z_agfi(lreg_lo, -c); 1563 } else { 1564 // -min_jint cannot be represented as simm32 in z_agfi 1565 // min_jint sign extended: 0xffffffff80000000 1566 // -min_jint as 64 bit integer: 0x0000000080000000 1567 // 0x80000000 can be represented as uimm32 in z_algfi 1568 // lreg_lo := lreg_lo + -min_jint == lreg_lo + 0x80000000 1569 __ z_algfi(lreg_lo, UCONST64(0x80000000)); 1570 } 1571 break; 1572 case lir_mul: __ z_msgfi(lreg_lo, c); break; 1573 default: 1574 ShouldNotReachHere(); 1575 } 1576 1577 } else { 1578 ShouldNotReachHere(); 1579 } 1580 1581 } else if (left->is_single_fpu()) { 1582 assert(left == dest, "left and dest must be equal"); 1583 FloatRegister lreg = left->as_float_reg(); 1584 FloatRegister rreg = right->is_single_fpu() ? right->as_float_reg() : fnoreg; 1585 Address raddr; 1586 1587 if (rreg == fnoreg) { 1588 assert(right->is_single_stack(), "constants should be loaded into register"); 1589 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1590 if (!Immediate::is_uimm12(raddr.disp())) { 1591 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, false); 1592 } 1593 } 1594 1595 if (rreg != fnoreg) { 1596 switch (code) { 1597 case lir_add: __ z_aebr(lreg, rreg); break; 1598 case lir_sub: __ z_sebr(lreg, rreg); break; 1599 case lir_mul: __ z_meebr(lreg, rreg); break; 1600 case lir_div: __ z_debr(lreg, rreg); break; 1601 default: ShouldNotReachHere(); 1602 } 1603 } else { 1604 switch (code) { 1605 case lir_add: __ z_aeb(lreg, raddr); break; 1606 case lir_sub: __ z_seb(lreg, raddr); break; 1607 case lir_mul: __ z_meeb(lreg, raddr); break; 1608 case lir_div: __ z_deb(lreg, raddr); break; 1609 default: ShouldNotReachHere(); 1610 } 1611 } 1612 } else if (left->is_double_fpu()) { 1613 assert(left == dest, "left and dest must be equal"); 1614 FloatRegister lreg = left->as_double_reg(); 1615 FloatRegister rreg = right->is_double_fpu() ? right->as_double_reg() : fnoreg; 1616 Address raddr; 1617 1618 if (rreg == fnoreg) { 1619 assert(right->is_double_stack(), "constants should be loaded into register"); 1620 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 1621 if (!Immediate::is_uimm12(raddr.disp())) { 1622 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, true); 1623 } 1624 } 1625 1626 if (rreg != fnoreg) { 1627 switch (code) { 1628 case lir_add: __ z_adbr(lreg, rreg); break; 1629 case lir_sub: __ z_sdbr(lreg, rreg); break; 1630 case lir_mul: __ z_mdbr(lreg, rreg); break; 1631 case lir_div: __ z_ddbr(lreg, rreg); break; 1632 default: ShouldNotReachHere(); 1633 } 1634 } else { 1635 switch (code) { 1636 case lir_add: __ z_adb(lreg, raddr); break; 1637 case lir_sub: __ z_sdb(lreg, raddr); break; 1638 case lir_mul: __ z_mdb(lreg, raddr); break; 1639 case lir_div: __ z_ddb(lreg, raddr); break; 1640 default: ShouldNotReachHere(); 1641 } 1642 } 1643 } else if (left->is_address()) { 1644 assert(left == dest, "left and dest must be equal"); 1645 assert(code == lir_add, "unsupported operation"); 1646 assert(right->is_constant(), "unsupported operand"); 1647 jint c = right->as_constant_ptr()->as_jint(); 1648 LIR_Address* lir_addr = left->as_address_ptr(); 1649 Address addr = as_Address(lir_addr); 1650 switch (lir_addr->type()) { 1651 case T_INT: 1652 __ add2mem_32(addr, c, Z_R1_scratch); 1653 break; 1654 case T_LONG: 1655 __ add2mem_64(addr, c, Z_R1_scratch); 1656 break; 1657 default: 1658 ShouldNotReachHere(); 1659 } 1660 } else { 1661 ShouldNotReachHere(); 1662 } 1663 } 1664 1665 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1666 switch (code) { 1667 case lir_sqrt: { 1668 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1669 FloatRegister src_reg = value->as_double_reg(); 1670 FloatRegister dst_reg = dest->as_double_reg(); 1671 __ z_sqdbr(dst_reg, src_reg); 1672 break; 1673 } 1674 case lir_abs: { 1675 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1676 FloatRegister src_reg = value->as_double_reg(); 1677 FloatRegister dst_reg = dest->as_double_reg(); 1678 __ z_lpdbr(dst_reg, src_reg); 1679 break; 1680 } 1681 default: { 1682 ShouldNotReachHere(); 1683 break; 1684 } 1685 } 1686 } 1687 1688 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1689 if (left->is_single_cpu()) { 1690 Register reg = left->as_register(); 1691 if (right->is_constant()) { 1692 int val = right->as_constant_ptr()->as_jint(); 1693 switch (code) { 1694 case lir_logic_and: __ z_nilf(reg, val); break; 1695 case lir_logic_or: __ z_oilf(reg, val); break; 1696 case lir_logic_xor: __ z_xilf(reg, val); break; 1697 default: ShouldNotReachHere(); 1698 } 1699 } else if (right->is_stack()) { 1700 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1701 switch (code) { 1702 case lir_logic_and: __ z_ny(reg, raddr); break; 1703 case lir_logic_or: __ z_oy(reg, raddr); break; 1704 case lir_logic_xor: __ z_xy(reg, raddr); break; 1705 default: ShouldNotReachHere(); 1706 } 1707 } else { 1708 Register rright = right->as_register(); 1709 switch (code) { 1710 case lir_logic_and: __ z_nr(reg, rright); break; 1711 case lir_logic_or : __ z_or(reg, rright); break; 1712 case lir_logic_xor: __ z_xr(reg, rright); break; 1713 default: ShouldNotReachHere(); 1714 } 1715 } 1716 move_regs(reg, dst->as_register()); 1717 } else { 1718 Register l_lo = left->as_register_lo(); 1719 if (right->is_constant()) { 1720 __ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong()); 1721 switch (code) { 1722 case lir_logic_and: 1723 __ z_ngr(l_lo, Z_R1_scratch); 1724 break; 1725 case lir_logic_or: 1726 __ z_ogr(l_lo, Z_R1_scratch); 1727 break; 1728 case lir_logic_xor: 1729 __ z_xgr(l_lo, Z_R1_scratch); 1730 break; 1731 default: ShouldNotReachHere(); 1732 } 1733 } else { 1734 Register r_lo; 1735 if (is_reference_type(right->type())) { 1736 r_lo = right->as_register(); 1737 } else { 1738 r_lo = right->as_register_lo(); 1739 } 1740 switch (code) { 1741 case lir_logic_and: 1742 __ z_ngr(l_lo, r_lo); 1743 break; 1744 case lir_logic_or: 1745 __ z_ogr(l_lo, r_lo); 1746 break; 1747 case lir_logic_xor: 1748 __ z_xgr(l_lo, r_lo); 1749 break; 1750 default: ShouldNotReachHere(); 1751 } 1752 } 1753 1754 Register dst_lo = dst->as_register_lo(); 1755 1756 move_regs(l_lo, dst_lo); 1757 } 1758 } 1759 1760 // See operand selection in LIRGenerator::do_ArithmeticOp_Int(). 1761 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 1762 if (left->is_double_cpu()) { 1763 // 64 bit integer case 1764 assert(left->is_double_cpu(), "left must be register"); 1765 assert(right->is_double_cpu() || is_power_of_2(right->as_jlong()), 1766 "right must be register or power of 2 constant"); 1767 assert(result->is_double_cpu(), "result must be register"); 1768 1769 Register lreg = left->as_register_lo(); 1770 Register dreg = result->as_register_lo(); 1771 1772 if (right->is_constant()) { 1773 // Convert division by a power of two into some shifts and logical operations. 1774 Register treg1 = Z_R0_scratch; 1775 Register treg2 = Z_R1_scratch; 1776 jlong divisor = right->as_jlong(); 1777 jlong log_divisor = log2i_exact(right->as_jlong()); 1778 1779 if (divisor == min_jlong) { 1780 // Min_jlong is special. Result is '0' except for min_jlong/min_jlong = 1. 1781 if (dreg == lreg) { 1782 NearLabel done; 1783 __ load_const_optimized(treg2, min_jlong); 1784 __ z_cgr(lreg, treg2); 1785 __ z_lghi(dreg, 0); // Preserves condition code. 1786 __ z_brne(done); 1787 __ z_lghi(dreg, 1); // min_jlong / min_jlong = 1 1788 __ bind(done); 1789 } else { 1790 assert_different_registers(dreg, lreg); 1791 NearLabel done; 1792 __ z_lghi(dreg, 0); 1793 __ compare64_and_branch(lreg, min_jlong, Assembler::bcondNotEqual, done); 1794 __ z_lghi(dreg, 1); 1795 __ bind(done); 1796 } 1797 return; 1798 } 1799 __ move_reg_if_needed(dreg, T_LONG, lreg, T_LONG); 1800 if (divisor == 2) { 1801 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1802 } else { 1803 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1804 __ and_imm(treg2, divisor - 1, treg1, true); 1805 } 1806 if (code == lir_idiv) { 1807 __ z_agr(dreg, treg2); 1808 __ z_srag(dreg, dreg, log_divisor); 1809 } else { 1810 assert(code == lir_irem, "check"); 1811 __ z_agr(treg2, dreg); 1812 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1813 __ z_sgr(dreg, treg2); 1814 } 1815 return; 1816 } 1817 1818 // Divisor is not a power of 2 constant. 1819 Register rreg = right->as_register_lo(); 1820 Register treg = temp->as_register_lo(); 1821 assert(right->is_double_cpu(), "right must be register"); 1822 assert(lreg == Z_R11, "see ldivInOpr()"); 1823 assert(rreg != lreg, "right register must not be same as left register"); 1824 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) || 1825 (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see ldivInOpr(), ldivOutOpr(), lremOutOpr()"); 1826 1827 Register R1 = lreg->predecessor(); 1828 Register R2 = rreg; 1829 assert(code != lir_idiv || lreg==dreg, "see code below"); 1830 if (code == lir_idiv) { 1831 __ z_lcgr(lreg, lreg); 1832 } else { 1833 __ clear_reg(dreg, true, false); 1834 } 1835 NearLabel done; 1836 __ compare64_and_branch(R2, -1, Assembler::bcondEqual, done); 1837 if (code == lir_idiv) { 1838 __ z_lcgr(lreg, lreg); // Revert lcgr above. 1839 } 1840 if (ImplicitDiv0Checks) { 1841 // No debug info because the idiv won't trap. 1842 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1843 // which is unnecessary, too. 1844 add_debug_info_for_div0(__ offset(), info); 1845 } 1846 __ z_dsgr(R1, R2); 1847 __ bind(done); 1848 return; 1849 } 1850 1851 // 32 bit integer case 1852 1853 assert(left->is_single_cpu(), "left must be register"); 1854 assert(right->is_single_cpu() || is_power_of_2(right->as_jint()), "right must be register or power of 2 constant"); 1855 assert(result->is_single_cpu(), "result must be register"); 1856 1857 Register lreg = left->as_register(); 1858 Register dreg = result->as_register(); 1859 1860 if (right->is_constant()) { 1861 // Convert division by a power of two into some shifts and logical operations. 1862 Register treg1 = Z_R0_scratch; 1863 Register treg2 = Z_R1_scratch; 1864 jlong divisor = right->as_jint(); 1865 jlong log_divisor = log2i_exact(right->as_jint()); 1866 __ move_reg_if_needed(dreg, T_LONG, lreg, T_INT); // sign extend 1867 if (divisor == 2) { 1868 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1869 } else { 1870 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1871 __ and_imm(treg2, divisor - 1, treg1, true); 1872 } 1873 if (code == lir_idiv) { 1874 __ z_agr(dreg, treg2); 1875 __ z_srag(dreg, dreg, log_divisor); 1876 } else { 1877 assert(code == lir_irem, "check"); 1878 __ z_agr(treg2, dreg); 1879 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1880 __ z_sgr(dreg, treg2); 1881 } 1882 return; 1883 } 1884 1885 // Divisor is not a power of 2 constant. 1886 Register rreg = right->as_register(); 1887 Register treg = temp->as_register(); 1888 assert(right->is_single_cpu(), "right must be register"); 1889 assert(lreg == Z_R11, "left register must be rax,"); 1890 assert(rreg != lreg, "right register must not be same as left register"); 1891 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) 1892 || (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see divInOpr(), divOutOpr(), remOutOpr()"); 1893 1894 Register R1 = lreg->predecessor(); 1895 Register R2 = rreg; 1896 __ move_reg_if_needed(lreg, T_LONG, lreg, T_INT); // sign extend 1897 if (ImplicitDiv0Checks) { 1898 // No debug info because the idiv won't trap. 1899 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1900 // which is unnecessary, too. 1901 add_debug_info_for_div0(__ offset(), info); 1902 } 1903 __ z_dsgfr(R1, R2); 1904 } 1905 1906 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1907 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1908 assert(exceptionPC->as_register() == Z_EXC_PC, "should match"); 1909 1910 // Exception object is not added to oop map by LinearScan 1911 // (LinearScan assumes that no oops are in fixed registers). 1912 info->add_register_oop(exceptionOop); 1913 1914 // Reuse the debug info from the safepoint poll for the throw op itself. 1915 __ get_PC(Z_EXC_PC); 1916 add_call_info(__ offset(), info); // for exception handler 1917 address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? C1StubId::handle_exception_id 1918 : C1StubId::handle_exception_nofpu_id); 1919 emit_call_c(stub); 1920 } 1921 1922 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1923 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1924 1925 __ branch_optimized(Assembler::bcondAlways, _unwind_handler_entry); 1926 } 1927 1928 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1929 ciArrayKlass* default_type = op->expected_type(); 1930 Register src = op->src()->as_register(); 1931 Register dst = op->dst()->as_register(); 1932 Register src_pos = op->src_pos()->as_register(); 1933 Register dst_pos = op->dst_pos()->as_register(); 1934 Register length = op->length()->as_register(); 1935 Register tmp = op->tmp()->as_register(); 1936 1937 CodeStub* stub = op->stub(); 1938 int flags = op->flags(); 1939 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 1940 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1941 1942 // If we don't know anything, just go through the generic arraycopy. 1943 if (default_type == nullptr) { 1944 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1945 1946 if (copyfunc_addr == nullptr) { 1947 // Take a slow path for generic arraycopy. 1948 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 1949 __ bind(*stub->continuation()); 1950 return; 1951 } 1952 1953 // Save outgoing arguments in callee saved registers (C convention) in case 1954 // a call to System.arraycopy is needed. 1955 Register callee_saved_src = Z_R10; 1956 Register callee_saved_src_pos = Z_R11; 1957 Register callee_saved_dst = Z_R12; 1958 Register callee_saved_dst_pos = Z_R13; 1959 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 1960 1961 __ lgr_if_needed(callee_saved_src, src); 1962 __ lgr_if_needed(callee_saved_src_pos, src_pos); 1963 __ lgr_if_needed(callee_saved_dst, dst); 1964 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 1965 __ lgr_if_needed(callee_saved_length, length); 1966 1967 // C function requires 64 bit values. 1968 __ z_lgfr(src_pos, src_pos); 1969 __ z_lgfr(dst_pos, dst_pos); 1970 __ z_lgfr(length, length); 1971 1972 // Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint. 1973 1974 // The arguments are in the corresponding registers. 1975 assert(Z_ARG1 == src, "assumption"); 1976 assert(Z_ARG2 == src_pos, "assumption"); 1977 assert(Z_ARG3 == dst, "assumption"); 1978 assert(Z_ARG4 == dst_pos, "assumption"); 1979 assert(Z_ARG5 == length, "assumption"); 1980 #ifndef PRODUCT 1981 if (PrintC1Statistics) { 1982 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt); 1983 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 1984 } 1985 #endif 1986 emit_call_c(copyfunc_addr); 1987 CHECK_BAILOUT(); 1988 1989 __ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 1990 1991 __ z_lgr(tmp, Z_RET); 1992 __ z_xilf(tmp, -1); 1993 1994 // Restore values from callee saved registers so they are where the stub 1995 // expects them. 1996 __ lgr_if_needed(src, callee_saved_src); 1997 __ lgr_if_needed(src_pos, callee_saved_src_pos); 1998 __ lgr_if_needed(dst, callee_saved_dst); 1999 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2000 __ lgr_if_needed(length, callee_saved_length); 2001 2002 __ z_sr(length, tmp); 2003 __ z_ar(src_pos, tmp); 2004 __ z_ar(dst_pos, tmp); 2005 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2006 2007 __ bind(*stub->continuation()); 2008 return; 2009 } 2010 2011 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 2012 2013 int elem_size = type2aelembytes(basic_type); 2014 int shift_amount; 2015 2016 switch (elem_size) { 2017 case 1 : 2018 shift_amount = 0; 2019 break; 2020 case 2 : 2021 shift_amount = 1; 2022 break; 2023 case 4 : 2024 shift_amount = 2; 2025 break; 2026 case 8 : 2027 shift_amount = 3; 2028 break; 2029 default: 2030 shift_amount = -1; 2031 ShouldNotReachHere(); 2032 } 2033 2034 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 2035 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 2036 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 2037 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 2038 2039 // Length and pos's are all sign extended at this point on 64bit. 2040 2041 // test for null 2042 if (flags & LIR_OpArrayCopy::src_null_check) { 2043 __ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2044 } 2045 if (flags & LIR_OpArrayCopy::dst_null_check) { 2046 __ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2047 } 2048 2049 // Check if negative. 2050 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2051 __ compare32_and_branch(src_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2052 } 2053 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2054 __ compare32_and_branch(dst_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2055 } 2056 2057 // If the compiler was not able to prove that exact type of the source or the destination 2058 // of the arraycopy is an array type, check at runtime if the source or the destination is 2059 // an instance type. 2060 if (flags & LIR_OpArrayCopy::type_check) { 2061 assert(Klass::_lh_neutral_value == 0, "or replace z_lt instructions"); 2062 2063 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2064 __ load_klass(tmp, dst); 2065 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2066 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2067 } 2068 2069 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2070 __ load_klass(tmp, src); 2071 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2072 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2073 } 2074 } 2075 2076 if (flags & LIR_OpArrayCopy::src_range_check) { 2077 __ z_la(tmp, Address(src_pos, length)); 2078 __ z_cl(tmp, src_length_addr); 2079 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2080 } 2081 if (flags & LIR_OpArrayCopy::dst_range_check) { 2082 __ z_la(tmp, Address(dst_pos, length)); 2083 __ z_cl(tmp, dst_length_addr); 2084 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2085 } 2086 2087 if (flags & LIR_OpArrayCopy::length_positive_check) { 2088 __ z_ltr(length, length); 2089 __ branch_optimized(Assembler::bcondNegative, *stub->entry()); 2090 } 2091 2092 // Stubs require 64 bit values. 2093 __ z_lgfr(src_pos, src_pos); // int -> long 2094 __ z_lgfr(dst_pos, dst_pos); // int -> long 2095 __ z_lgfr(length, length); // int -> long 2096 2097 if (flags & LIR_OpArrayCopy::type_check) { 2098 // We don't know the array types are compatible. 2099 if (basic_type != T_OBJECT) { 2100 // Simple test for basic type arrays. 2101 if (UseCompressedClassPointers) { 2102 __ z_l(tmp, src_klass_addr); 2103 __ z_c(tmp, dst_klass_addr); 2104 } else { 2105 __ z_lg(tmp, src_klass_addr); 2106 __ z_cg(tmp, dst_klass_addr); 2107 } 2108 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2109 } else { 2110 // For object arrays, if src is a sub class of dst then we can 2111 // safely do the copy. 2112 NearLabel cont, slow; 2113 Register src_klass = Z_R1_scratch; 2114 Register dst_klass = Z_R10; 2115 2116 __ load_klass(src_klass, src); 2117 __ load_klass(dst_klass, dst); 2118 2119 __ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, nullptr); 2120 2121 store_parameter(src_klass, 0); // sub 2122 store_parameter(dst_klass, 1); // super 2123 emit_call_c(Runtime1::entry_for (C1StubId::slow_subtype_check_id)); 2124 CHECK_BAILOUT2(cont, slow); 2125 // Sets condition code 0 for match (2 otherwise). 2126 __ branch_optimized(Assembler::bcondEqual, cont); 2127 2128 __ bind(slow); 2129 2130 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2131 if (copyfunc_addr != nullptr) { // use stub if available 2132 // Src is not a sub class of dst so we have to do a 2133 // per-element check. 2134 2135 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2136 if ((flags & mask) != mask) { 2137 // Check that at least both of them object arrays. 2138 assert(flags & mask, "one of the two should be known to be an object array"); 2139 2140 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2141 __ load_klass(tmp, src); 2142 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2143 __ load_klass(tmp, dst); 2144 } 2145 Address klass_lh_addr(tmp, Klass::layout_helper_offset()); 2146 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2147 __ load_const_optimized(Z_R1_scratch, objArray_lh); 2148 __ z_c(Z_R1_scratch, klass_lh_addr); 2149 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2150 } 2151 2152 // Save outgoing arguments in callee saved registers (C convention) in case 2153 // a call to System.arraycopy is needed. 2154 Register callee_saved_src = Z_R10; 2155 Register callee_saved_src_pos = Z_R11; 2156 Register callee_saved_dst = Z_R12; 2157 Register callee_saved_dst_pos = Z_R13; 2158 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 2159 2160 __ lgr_if_needed(callee_saved_src, src); 2161 __ lgr_if_needed(callee_saved_src_pos, src_pos); 2162 __ lgr_if_needed(callee_saved_dst, dst); 2163 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 2164 __ lgr_if_needed(callee_saved_length, length); 2165 2166 __ z_llgfr(length, length); // Higher 32bits must be null. 2167 2168 __ z_sllg(Z_ARG1, src_pos, shift_amount); // index -> byte offset 2169 __ z_sllg(Z_ARG2, dst_pos, shift_amount); // index -> byte offset 2170 2171 __ z_la(Z_ARG1, Address(src, Z_ARG1, arrayOopDesc::base_offset_in_bytes(basic_type))); 2172 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2173 __ z_la(Z_ARG2, Address(dst, Z_ARG2, arrayOopDesc::base_offset_in_bytes(basic_type))); 2174 assert_different_registers(Z_ARG2, dst, length); 2175 2176 __ z_lgr(Z_ARG3, length); 2177 assert_different_registers(Z_ARG3, dst); 2178 2179 __ load_klass(Z_ARG5, dst); 2180 __ z_lg(Z_ARG5, Address(Z_ARG5, ObjArrayKlass::element_klass_offset())); 2181 __ z_lg(Z_ARG4, Address(Z_ARG5, Klass::super_check_offset_offset())); 2182 emit_call_c(copyfunc_addr); 2183 CHECK_BAILOUT2(cont, slow); 2184 2185 #ifndef PRODUCT 2186 if (PrintC1Statistics) { 2187 NearLabel failed; 2188 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondNotEqual, failed); 2189 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_cnt); 2190 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2191 __ bind(failed); 2192 } 2193 #endif 2194 2195 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 2196 2197 #ifndef PRODUCT 2198 if (PrintC1Statistics) { 2199 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_attempt_cnt); 2200 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2201 } 2202 #endif 2203 2204 __ z_lgr(tmp, Z_RET); 2205 __ z_xilf(tmp, -1); 2206 2207 // Restore previously spilled arguments 2208 __ lgr_if_needed(src, callee_saved_src); 2209 __ lgr_if_needed(src_pos, callee_saved_src_pos); 2210 __ lgr_if_needed(dst, callee_saved_dst); 2211 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2212 __ lgr_if_needed(length, callee_saved_length); 2213 2214 __ z_sr(length, tmp); 2215 __ z_ar(src_pos, tmp); 2216 __ z_ar(dst_pos, tmp); 2217 } 2218 2219 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2220 2221 __ bind(cont); 2222 } 2223 } 2224 2225 #ifdef ASSERT 2226 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2227 // Sanity check the known type with the incoming class. For the 2228 // primitive case the types must match exactly with src.klass and 2229 // dst.klass each exactly matching the default type. For the 2230 // object array case, if no type check is needed then either the 2231 // dst type is exactly the expected type and the src type is a 2232 // subtype which we can't check or src is the same array as dst 2233 // but not necessarily exactly of type default_type. 2234 NearLabel known_ok, halt; 2235 metadata2reg(default_type->constant_encoding(), tmp); 2236 if (UseCompressedClassPointers) { 2237 __ encode_klass_not_null(tmp); 2238 } 2239 2240 if (basic_type != T_OBJECT) { 2241 if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } 2242 else { __ z_cg(tmp, dst_klass_addr); } 2243 __ branch_optimized(Assembler::bcondNotEqual, halt); 2244 if (UseCompressedClassPointers) { __ z_c (tmp, src_klass_addr); } 2245 else { __ z_cg(tmp, src_klass_addr); } 2246 __ branch_optimized(Assembler::bcondEqual, known_ok); 2247 } else { 2248 if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } 2249 else { __ z_cg(tmp, dst_klass_addr); } 2250 __ branch_optimized(Assembler::bcondEqual, known_ok); 2251 __ compareU64_and_branch(src, dst, Assembler::bcondEqual, known_ok); 2252 } 2253 __ bind(halt); 2254 __ stop("incorrect type information in arraycopy"); 2255 __ bind(known_ok); 2256 } 2257 #endif 2258 2259 #ifndef PRODUCT 2260 if (PrintC1Statistics) { 2261 __ load_const_optimized(Z_R1_scratch, Runtime1::arraycopy_count_address(basic_type)); 2262 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2263 } 2264 #endif 2265 2266 __ z_sllg(tmp, src_pos, shift_amount); // index -> byte offset 2267 __ z_sllg(Z_R1_scratch, dst_pos, shift_amount); // index -> byte offset 2268 2269 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2270 __ z_la(Z_ARG1, Address(src, tmp, arrayOopDesc::base_offset_in_bytes(basic_type))); 2271 assert_different_registers(Z_ARG2, length); 2272 __ z_la(Z_ARG2, Address(dst, Z_R1_scratch, arrayOopDesc::base_offset_in_bytes(basic_type))); 2273 __ lgr_if_needed(Z_ARG3, length); 2274 2275 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2276 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2277 const char *name; 2278 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2279 __ call_VM_leaf(entry); 2280 2281 if (stub != nullptr) { 2282 __ bind(*stub->continuation()); 2283 } 2284 } 2285 2286 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2287 if (dest->is_single_cpu()) { 2288 if (left->type() == T_OBJECT) { 2289 switch (code) { 2290 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2291 case lir_shr: __ z_srag (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2292 case lir_ushr: __ z_srlg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2293 default: ShouldNotReachHere(); 2294 } 2295 } else { 2296 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2297 Register masked_count = Z_R1_scratch; 2298 __ z_lr(masked_count, count->as_register()); 2299 __ z_nill(masked_count, 31); 2300 switch (code) { 2301 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, masked_count); break; 2302 case lir_shr: __ z_sra (dest->as_register(), 0, masked_count); break; 2303 case lir_ushr: __ z_srl (dest->as_register(), 0, masked_count); break; 2304 default: ShouldNotReachHere(); 2305 } 2306 } 2307 } else { 2308 switch (code) { 2309 case lir_shl: __ z_sllg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2310 case lir_shr: __ z_srag (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2311 case lir_ushr: __ z_srlg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2312 default: ShouldNotReachHere(); 2313 } 2314 } 2315 } 2316 2317 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2318 if (left->type() == T_OBJECT) { 2319 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2320 Register l = left->as_register(); 2321 Register d = dest->as_register_lo(); 2322 switch (code) { 2323 case lir_shl: __ z_sllg (d, l, count); break; 2324 case lir_shr: __ z_srag (d, l, count); break; 2325 case lir_ushr: __ z_srlg (d, l, count); break; 2326 default: ShouldNotReachHere(); 2327 } 2328 return; 2329 } 2330 if (dest->is_single_cpu()) { 2331 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2332 count = count & 0x1F; // Java spec 2333 switch (code) { 2334 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), count); break; 2335 case lir_shr: __ z_sra (dest->as_register(), count); break; 2336 case lir_ushr: __ z_srl (dest->as_register(), count); break; 2337 default: ShouldNotReachHere(); 2338 } 2339 } else if (dest->is_double_cpu()) { 2340 count = count & 63; // Java spec 2341 Register l = left->as_pointer_register(); 2342 Register d = dest->as_pointer_register(); 2343 switch (code) { 2344 case lir_shl: __ z_sllg (d, l, count); break; 2345 case lir_shr: __ z_srag (d, l, count); break; 2346 case lir_ushr: __ z_srlg (d, l, count); break; 2347 default: ShouldNotReachHere(); 2348 } 2349 } else { 2350 ShouldNotReachHere(); 2351 } 2352 } 2353 2354 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2355 if (op->init_check()) { 2356 // Make sure klass is initialized & doesn't have finalizer. 2357 // init_state needs acquire, but S390 is TSO, and so we are already good. 2358 const int state_offset = in_bytes(InstanceKlass::init_state_offset()); 2359 Register iklass = op->klass()->as_register(); 2360 add_debug_info_for_null_check_here(op->stub()->info()); 2361 if (Immediate::is_uimm12(state_offset)) { 2362 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized); 2363 } else { 2364 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized); 2365 } 2366 __ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far. 2367 } 2368 __ allocate_object(op->obj()->as_register(), 2369 op->tmp1()->as_register(), 2370 op->tmp2()->as_register(), 2371 op->header_size(), 2372 op->object_size(), 2373 op->klass()->as_register(), 2374 *op->stub()->entry()); 2375 __ bind(*op->stub()->continuation()); 2376 __ verify_oop(op->obj()->as_register(), FILE_AND_LINE); 2377 } 2378 2379 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2380 Register len = op->len()->as_register(); 2381 __ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend 2382 2383 if (UseSlowPath || 2384 (!UseFastNewObjectArray && (is_reference_type(op->type()))) || 2385 (!UseFastNewTypeArray && (!is_reference_type(op->type())))) { 2386 __ z_brul(*op->stub()->entry()); 2387 } else { 2388 __ allocate_array(op->obj()->as_register(), 2389 op->len()->as_register(), 2390 op->tmp1()->as_register(), 2391 op->tmp2()->as_register(), 2392 arrayOopDesc::base_offset_in_bytes(op->type()), 2393 type2aelembytes(op->type()), 2394 op->klass()->as_register(), 2395 *op->stub()->entry(), 2396 op->zero_array()); 2397 } 2398 __ bind(*op->stub()->continuation()); 2399 } 2400 2401 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, 2402 Register recv, Register tmp1, Label* update_done) { 2403 uint i; 2404 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2405 Label next_test; 2406 // See if the receiver is receiver[n]. 2407 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2408 __ z_cg(recv, receiver_addr); 2409 __ z_brne(next_test); 2410 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 2411 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2412 __ branch_optimized(Assembler::bcondAlways, *update_done); 2413 __ bind(next_test); 2414 } 2415 2416 // Didn't find receiver; find next empty slot and fill it in. 2417 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2418 Label next_test; 2419 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2420 __ z_ltg(Z_R0_scratch, recv_addr); 2421 __ z_brne(next_test); 2422 __ z_stg(recv, recv_addr); 2423 __ load_const_optimized(tmp1, DataLayout::counter_increment); 2424 __ z_stg(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)), mdo); 2425 __ branch_optimized(Assembler::bcondAlways, *update_done); 2426 __ bind(next_test); 2427 } 2428 } 2429 2430 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2431 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2432 Unimplemented(); 2433 } 2434 2435 void LIR_Assembler::store_parameter(Register r, int param_num) { 2436 assert(param_num >= 0, "invalid num"); 2437 int offset_in_bytes = param_num * BytesPerWord; 2438 check_reserved_argument_area(offset_in_bytes); 2439 offset_in_bytes += FrameMap::first_available_sp_in_frame; 2440 __ z_stg(r, offset_in_bytes, Z_SP); 2441 } 2442 2443 void LIR_Assembler::store_parameter(jint c, int param_num) { 2444 assert(param_num >= 0, "invalid num"); 2445 int offset_in_bytes = param_num * BytesPerWord; 2446 check_reserved_argument_area(offset_in_bytes); 2447 offset_in_bytes += FrameMap::first_available_sp_in_frame; 2448 __ store_const(Address(Z_SP, offset_in_bytes), c, Z_R1_scratch, true); 2449 } 2450 2451 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2452 // We always need a stub for the failure case. 2453 CodeStub* stub = op->stub(); 2454 Register obj = op->object()->as_register(); 2455 Register k_RInfo = op->tmp1()->as_register(); 2456 Register klass_RInfo = op->tmp2()->as_register(); 2457 Register dst = op->result_opr()->as_register(); 2458 Register Rtmp1 = Z_R1_scratch; 2459 ciKlass* k = op->klass(); 2460 2461 assert(!op->tmp3()->is_valid(), "tmp3's not needed"); 2462 2463 // Check if it needs to be profiled. 2464 ciMethodData* md = nullptr; 2465 ciProfileData* data = nullptr; 2466 2467 if (op->should_profile()) { 2468 ciMethod* method = op->profiled_method(); 2469 assert(method != nullptr, "Should have method"); 2470 int bci = op->profiled_bci(); 2471 md = method->method_data_or_null(); 2472 assert(md != nullptr, "Sanity"); 2473 data = md->bci_to_data(bci); 2474 assert(data != nullptr, "need data for type check"); 2475 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2476 } 2477 2478 // Temp operands do not overlap with inputs, if this is their last 2479 // use (end of range is exclusive), so a register conflict is possible. 2480 if (obj == k_RInfo) { 2481 k_RInfo = dst; 2482 } else if (obj == klass_RInfo) { 2483 klass_RInfo = dst; 2484 } 2485 assert_different_registers(obj, k_RInfo, klass_RInfo); 2486 2487 if (op->should_profile()) { 2488 Register mdo = klass_RInfo; 2489 metadata2reg(md->constant_encoding(), mdo); 2490 NearLabel not_null; 2491 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2492 // Object is null; update MDO and exit. 2493 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2494 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2495 __ or2mem_8(data_addr, header_bits); 2496 __ branch_optimized(Assembler::bcondAlways, *obj_is_null); 2497 __ bind(not_null); 2498 2499 NearLabel update_done; 2500 Register recv = k_RInfo; 2501 __ load_klass(recv, obj); 2502 type_profile_helper(mdo, md, data, recv, Rtmp1, &update_done); 2503 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2504 __ add2mem_64(counter_addr, DataLayout::counter_increment, Rtmp1); 2505 __ bind(update_done); 2506 } else { 2507 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondEqual, *obj_is_null); 2508 } 2509 2510 Label *failure_target = failure; 2511 Label *success_target = success; 2512 2513 // Patching may screw with our temporaries, 2514 // so let's do it before loading the class. 2515 if (k->is_loaded()) { 2516 metadata2reg(k->constant_encoding(), k_RInfo); 2517 } else { 2518 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2519 } 2520 assert(obj != k_RInfo, "must be different"); 2521 2522 __ verify_oop(obj, FILE_AND_LINE); 2523 2524 // Get object class. 2525 // Not a safepoint as obj null check happens earlier. 2526 if (op->fast_check()) { 2527 if (UseCompressedClassPointers) { 2528 __ load_klass(klass_RInfo, obj); 2529 __ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target); 2530 } else { 2531 __ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 2532 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2533 } 2534 // Successful cast, fall through to profile or jump. 2535 } else { 2536 bool need_slow_path = !k->is_loaded() || 2537 ((int) k->super_check_offset() == in_bytes(Klass::secondary_super_cache_offset())); 2538 intptr_t super_check_offset = k->is_loaded() ? k->super_check_offset() : -1L; 2539 __ load_klass(klass_RInfo, obj); 2540 // Perform the fast part of the checking logic. 2541 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, 2542 (need_slow_path ? success_target : nullptr), 2543 failure_target, nullptr, 2544 RegisterOrConstant(super_check_offset)); 2545 if (need_slow_path) { 2546 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2547 address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id); 2548 store_parameter(klass_RInfo, 0); // sub 2549 store_parameter(k_RInfo, 1); // super 2550 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2551 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2552 // Fall through to success case. 2553 } 2554 } 2555 2556 __ branch_optimized(Assembler::bcondAlways, *success); 2557 } 2558 2559 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2560 LIR_Code code = op->code(); 2561 if (code == lir_store_check) { 2562 Register value = op->object()->as_register(); 2563 Register array = op->array()->as_register(); 2564 Register k_RInfo = op->tmp1()->as_register(); 2565 Register klass_RInfo = op->tmp2()->as_register(); 2566 Register Rtmp1 = Z_R1_scratch; 2567 2568 CodeStub* stub = op->stub(); 2569 2570 // Check if it needs to be profiled. 2571 ciMethodData* md = nullptr; 2572 ciProfileData* data = nullptr; 2573 2574 assert_different_registers(value, k_RInfo, klass_RInfo); 2575 2576 if (op->should_profile()) { 2577 ciMethod* method = op->profiled_method(); 2578 assert(method != nullptr, "Should have method"); 2579 int bci = op->profiled_bci(); 2580 md = method->method_data_or_null(); 2581 assert(md != nullptr, "Sanity"); 2582 data = md->bci_to_data(bci); 2583 assert(data != nullptr, "need data for type check"); 2584 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2585 } 2586 NearLabel done; 2587 Label *success_target = &done; 2588 Label *failure_target = stub->entry(); 2589 2590 if (op->should_profile()) { 2591 Register mdo = klass_RInfo; 2592 metadata2reg(md->constant_encoding(), mdo); 2593 NearLabel not_null; 2594 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2595 // Object is null; update MDO and exit. 2596 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2597 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2598 __ or2mem_8(data_addr, header_bits); 2599 __ branch_optimized(Assembler::bcondAlways, done); 2600 __ bind(not_null); 2601 2602 NearLabel update_done; 2603 Register recv = k_RInfo; 2604 __ load_klass(recv, value); 2605 type_profile_helper(mdo, md, data, recv, Rtmp1, &update_done); 2606 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2607 __ add2mem_64(counter_addr, DataLayout::counter_increment, Rtmp1); 2608 __ bind(update_done); 2609 } else { 2610 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondEqual, done); 2611 } 2612 2613 add_debug_info_for_null_check_here(op->info_for_exception()); 2614 __ load_klass(k_RInfo, array); 2615 __ load_klass(klass_RInfo, value); 2616 2617 // Get instance klass (it's already uncompressed). 2618 __ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 2619 // Perform the fast part of the checking logic. 2620 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 2621 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2622 address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id); 2623 store_parameter(klass_RInfo, 0); // sub 2624 store_parameter(k_RInfo, 1); // super 2625 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2626 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2627 // Fall through to success case. 2628 2629 __ bind(done); 2630 } else { 2631 if (code == lir_checkcast) { 2632 Register obj = op->object()->as_register(); 2633 Register dst = op->result_opr()->as_register(); 2634 NearLabel success; 2635 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2636 __ bind(success); 2637 __ lgr_if_needed(dst, obj); 2638 } else { 2639 if (code == lir_instanceof) { 2640 Register obj = op->object()->as_register(); 2641 Register dst = op->result_opr()->as_register(); 2642 NearLabel success, failure, done; 2643 emit_typecheck_helper(op, &success, &failure, &failure); 2644 __ bind(failure); 2645 __ clear_reg(dst); 2646 __ branch_optimized(Assembler::bcondAlways, done); 2647 __ bind(success); 2648 __ load_const_optimized(dst, 1); 2649 __ bind(done); 2650 } else { 2651 ShouldNotReachHere(); 2652 } 2653 } 2654 } 2655 } 2656 2657 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2658 Register addr = op->addr()->as_pointer_register(); 2659 Register t1_cmp = Z_R1_scratch; 2660 if (op->code() == lir_cas_long) { 2661 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2662 Register new_value_lo = op->new_value()->as_register_lo(); 2663 __ z_lgr(t1_cmp, cmp_value_lo); 2664 // Perform the compare and swap operation. 2665 __ z_csg(t1_cmp, new_value_lo, 0, addr); 2666 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2667 Register cmp_value = op->cmp_value()->as_register(); 2668 Register new_value = op->new_value()->as_register(); 2669 if (op->code() == lir_cas_obj) { 2670 if (UseCompressedOops) { 2671 t1_cmp = op->tmp1()->as_register(); 2672 Register t2_new = op->tmp2()->as_register(); 2673 assert_different_registers(cmp_value, new_value, addr, t1_cmp, t2_new); 2674 __ oop_encoder(t1_cmp, cmp_value, true /*maybe null*/); 2675 __ oop_encoder(t2_new, new_value, true /*maybe null*/); 2676 __ z_cs(t1_cmp, t2_new, 0, addr); 2677 } else { 2678 __ z_lgr(t1_cmp, cmp_value); 2679 __ z_csg(t1_cmp, new_value, 0, addr); 2680 } 2681 } else { 2682 __ z_lr(t1_cmp, cmp_value); 2683 __ z_cs(t1_cmp, new_value, 0, addr); 2684 } 2685 } else { 2686 ShouldNotReachHere(); // new lir_cas_?? 2687 } 2688 } 2689 2690 void LIR_Assembler::breakpoint() { 2691 Unimplemented(); 2692 // __ breakpoint_trap(); 2693 } 2694 2695 void LIR_Assembler::push(LIR_Opr opr) { 2696 ShouldNotCallThis(); // unused 2697 } 2698 2699 void LIR_Assembler::pop(LIR_Opr opr) { 2700 ShouldNotCallThis(); // unused 2701 } 2702 2703 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2704 Address addr = frame_map()->address_for_monitor_lock(monitor_no); 2705 __ add2reg(dst_opr->as_register(), addr.disp(), addr.base()); 2706 } 2707 2708 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2709 Register obj = op->obj_opr()->as_register(); // May not be an oop. 2710 Register hdr = op->hdr_opr()->as_register(); 2711 Register lock = op->lock_opr()->as_register(); 2712 if (LockingMode == LM_MONITOR) { 2713 if (op->info() != nullptr) { 2714 add_debug_info_for_null_check_here(op->info()); 2715 __ null_check(obj); 2716 } 2717 __ branch_optimized(Assembler::bcondAlways, *op->stub()->entry()); 2718 } else if (op->code() == lir_lock) { 2719 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2720 // Add debug info for NullPointerException only if one is possible. 2721 if (op->info() != nullptr) { 2722 add_debug_info_for_null_check_here(op->info()); 2723 } 2724 __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2725 // done 2726 } else if (op->code() == lir_unlock) { 2727 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2728 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2729 } else { 2730 ShouldNotReachHere(); 2731 } 2732 __ bind(*op->stub()->continuation()); 2733 } 2734 2735 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 2736 Register obj = op->obj()->as_pointer_register(); 2737 Register result = op->result_opr()->as_pointer_register(); 2738 2739 CodeEmitInfo* info = op->info(); 2740 if (info != nullptr) { 2741 add_debug_info_for_null_check_here(info); 2742 } 2743 2744 if (UseCompressedClassPointers) { 2745 __ z_llgf(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2746 __ decode_klass_not_null(result); 2747 } else { 2748 __ z_lg(result, Address(obj, oopDesc::klass_offset_in_bytes())); 2749 } 2750 } 2751 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2752 ciMethod* method = op->profiled_method(); 2753 int bci = op->profiled_bci(); 2754 ciMethod* callee = op->profiled_callee(); 2755 2756 // Update counter for all call types. 2757 ciMethodData* md = method->method_data_or_null(); 2758 assert(md != nullptr, "Sanity"); 2759 ciProfileData* data = md->bci_to_data(bci); 2760 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 2761 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2762 Register mdo = op->mdo()->as_register(); 2763 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2764 Register tmp1 = op->tmp1()->as_register_lo(); 2765 metadata2reg(md->constant_encoding(), mdo); 2766 2767 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2768 // Perform additional virtual call profiling for invokevirtual and 2769 // invokeinterface bytecodes 2770 if (op->should_profile_receiver_type()) { 2771 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2772 Register recv = op->recv()->as_register(); 2773 assert_different_registers(mdo, tmp1, recv); 2774 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2775 ciKlass* known_klass = op->known_holder(); 2776 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 2777 // We know the type that will be seen at this call site; we can 2778 // statically update the MethodData* rather than needing to do 2779 // dynamic tests on the receiver type. 2780 2781 // NOTE: we should probably put a lock around this search to 2782 // avoid collisions by concurrent compilations. 2783 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2784 uint i; 2785 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2786 ciKlass* receiver = vc_data->receiver(i); 2787 if (known_klass->equals(receiver)) { 2788 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2789 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2790 return; 2791 } 2792 } 2793 2794 // Receiver type not found in profile data. Select an empty slot. 2795 2796 // Note that this is less efficient than it should be because it 2797 // always does a write to the receiver part of the 2798 // VirtualCallData rather than just the first time. 2799 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2800 ciKlass* receiver = vc_data->receiver(i); 2801 if (receiver == nullptr) { 2802 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 2803 metadata2reg(known_klass->constant_encoding(), tmp1); 2804 __ z_stg(tmp1, recv_addr); 2805 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2806 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2807 return; 2808 } 2809 } 2810 } else { 2811 __ load_klass(recv, recv); 2812 NearLabel update_done; 2813 type_profile_helper(mdo, md, data, recv, tmp1, &update_done); 2814 // Receiver did not match any saved receiver and there is no empty row for it. 2815 // Increment total counter to indicate polymorphic case. 2816 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2817 __ bind(update_done); 2818 } 2819 } else { 2820 // static call 2821 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2822 } 2823 } 2824 2825 void LIR_Assembler::align_backward_branch_target() { 2826 __ align(OptoLoopAlignment); 2827 } 2828 2829 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2830 ShouldNotCallThis(); // There are no delay slots on ZARCH_64. 2831 } 2832 2833 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2834 // tmp must be unused 2835 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2836 assert(left->is_register(), "can only handle registers"); 2837 2838 if (left->is_single_cpu()) { 2839 __ z_lcr(dest->as_register(), left->as_register()); 2840 } else if (left->is_single_fpu()) { 2841 __ z_lcebr(dest->as_float_reg(), left->as_float_reg()); 2842 } else if (left->is_double_fpu()) { 2843 __ z_lcdbr(dest->as_double_reg(), left->as_double_reg()); 2844 } else { 2845 assert(left->is_double_cpu(), "Must be a long"); 2846 __ z_lcgr(dest->as_register_lo(), left->as_register_lo()); 2847 } 2848 } 2849 2850 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2851 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2852 assert(!tmp->is_valid(), "don't need temporary"); 2853 emit_call_c(dest); 2854 CHECK_BAILOUT(); 2855 if (info != nullptr) { 2856 add_call_info_here(info); 2857 } 2858 } 2859 2860 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2861 ShouldNotCallThis(); // not needed on ZARCH_64 2862 } 2863 2864 void LIR_Assembler::membar() { 2865 __ z_fence(); 2866 } 2867 2868 void LIR_Assembler::membar_acquire() { 2869 __ z_acquire(); 2870 } 2871 2872 void LIR_Assembler::membar_release() { 2873 __ z_release(); 2874 } 2875 2876 void LIR_Assembler::membar_loadload() { 2877 __ z_acquire(); 2878 } 2879 2880 void LIR_Assembler::membar_storestore() { 2881 __ z_release(); 2882 } 2883 2884 void LIR_Assembler::membar_loadstore() { 2885 __ z_acquire(); 2886 } 2887 2888 void LIR_Assembler::membar_storeload() { 2889 __ z_fence(); 2890 } 2891 2892 void LIR_Assembler::on_spin_wait() { 2893 Unimplemented(); 2894 } 2895 2896 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2897 assert(patch_code == lir_patch_none, "Patch code not supported"); 2898 LIR_Address* addr = addr_opr->as_address_ptr(); 2899 assert(addr->scale() == LIR_Address::times_1, "scaling unsupported"); 2900 __ load_address(dest->as_pointer_register(), as_Address(addr)); 2901 } 2902 2903 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2904 ShouldNotCallThis(); // unused 2905 } 2906 2907 #ifdef ASSERT 2908 // Emit run-time assertion. 2909 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2910 Unimplemented(); 2911 } 2912 #endif 2913 2914 void LIR_Assembler::peephole(LIR_List*) { 2915 // Do nothing for now. 2916 } 2917 2918 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2919 assert(code == lir_xadd, "lir_xchg not supported"); 2920 Address src_addr = as_Address(src->as_address_ptr()); 2921 Register base = src_addr.base(); 2922 intptr_t disp = src_addr.disp(); 2923 if (src_addr.index()->is_valid()) { 2924 // LAA and LAAG do not support index register. 2925 __ load_address(Z_R1_scratch, src_addr); 2926 base = Z_R1_scratch; 2927 disp = 0; 2928 } 2929 if (data->type() == T_INT) { 2930 __ z_laa(dest->as_register(), data->as_register(), disp, base); 2931 } else if (data->type() == T_LONG) { 2932 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 2933 __ z_laag(dest->as_register_lo(), data->as_register_lo(), disp, base); 2934 } else { 2935 ShouldNotReachHere(); 2936 } 2937 } 2938 2939 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2940 Register obj = op->obj()->as_register(); 2941 Register tmp1 = op->tmp()->as_pointer_register(); 2942 Register tmp2 = Z_R1_scratch; 2943 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2944 ciKlass* exact_klass = op->exact_klass(); 2945 intptr_t current_klass = op->current_klass(); 2946 bool not_null = op->not_null(); 2947 bool no_conflict = op->no_conflict(); 2948 2949 Label update, next, none, null_seen, init_klass; 2950 2951 bool do_null = !not_null; 2952 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2953 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2954 2955 assert(do_null || do_update, "why are we here?"); 2956 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2957 2958 __ verify_oop(obj, FILE_AND_LINE); 2959 2960 if (do_null || tmp1 != obj DEBUG_ONLY(|| true)) { 2961 __ z_ltgr(tmp1, obj); 2962 } 2963 if (do_null) { 2964 __ z_brnz(update); 2965 if (!TypeEntries::was_null_seen(current_klass)) { 2966 __ z_lg(tmp1, mdo_addr); 2967 __ z_oill(tmp1, TypeEntries::null_seen); 2968 __ z_stg(tmp1, mdo_addr); 2969 } 2970 if (do_update) { 2971 __ z_bru(next); 2972 } 2973 } else { 2974 __ asm_assert(Assembler::bcondNotZero, "unexpected null obj", __LINE__); 2975 } 2976 2977 __ bind(update); 2978 2979 if (do_update) { 2980 #ifdef ASSERT 2981 if (exact_klass != nullptr) { 2982 __ load_klass(tmp1, tmp1); 2983 metadata2reg(exact_klass->constant_encoding(), tmp2); 2984 __ z_cgr(tmp1, tmp2); 2985 __ asm_assert(Assembler::bcondEqual, "exact klass and actual klass differ", __LINE__); 2986 } 2987 #endif 2988 2989 Label do_update; 2990 __ z_lg(tmp2, mdo_addr); 2991 2992 if (!no_conflict) { 2993 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) { 2994 if (exact_klass != nullptr) { 2995 metadata2reg(exact_klass->constant_encoding(), tmp1); 2996 } else { 2997 __ load_klass(tmp1, tmp1); 2998 } 2999 3000 // Klass seen before: nothing to do (regardless of unknown bit). 3001 __ z_lgr(Z_R0_scratch, tmp2); 3002 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 3003 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 3004 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 3005 3006 // Already unknown: Nothing to do anymore. 3007 __ z_tmll(tmp2, TypeEntries::type_unknown); 3008 __ z_brc(Assembler::bcondAllOne, next); 3009 3010 if (TypeEntries::is_type_none(current_klass)) { 3011 __ z_lgr(Z_R0_scratch, tmp2); 3012 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3013 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3014 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass); 3015 } 3016 } else { 3017 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3018 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3019 3020 // Already unknown: Nothing to do anymore. 3021 __ z_tmll(tmp2, TypeEntries::type_unknown); 3022 __ z_brc(Assembler::bcondAllOne, next); 3023 } 3024 3025 // Different than before. Cannot keep accurate profile. 3026 __ z_oill(tmp2, TypeEntries::type_unknown); 3027 __ z_bru(do_update); 3028 } else { 3029 // There's a single possible klass at this profile point. 3030 assert(exact_klass != nullptr, "should be"); 3031 if (TypeEntries::is_type_none(current_klass)) { 3032 metadata2reg(exact_klass->constant_encoding(), tmp1); 3033 __ z_lgr(Z_R0_scratch, tmp2); 3034 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 3035 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 3036 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 3037 #ifdef ASSERT 3038 { 3039 Label ok; 3040 __ z_lgr(Z_R0_scratch, tmp2); 3041 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3042 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3043 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, ok); 3044 __ stop("unexpected profiling mismatch"); 3045 __ bind(ok); 3046 } 3047 #endif 3048 3049 } else { 3050 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3051 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3052 3053 // Already unknown: Nothing to do anymore. 3054 __ z_tmll(tmp2, TypeEntries::type_unknown); 3055 __ z_brc(Assembler::bcondAllOne, next); 3056 __ z_oill(tmp2, TypeEntries::type_unknown); 3057 __ z_bru(do_update); 3058 } 3059 } 3060 3061 __ bind(init_klass); 3062 // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3063 __ z_ogr(tmp2, tmp1); 3064 3065 __ bind(do_update); 3066 __ z_stg(tmp2, mdo_addr); 3067 3068 __ bind(next); 3069 } 3070 } 3071 3072 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) { 3073 Unimplemented(); 3074 } 3075 3076 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3077 assert(op->crc()->is_single_cpu(), "crc must be register"); 3078 assert(op->val()->is_single_cpu(), "byte value must be register"); 3079 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3080 Register crc = op->crc()->as_register(); 3081 Register val = op->val()->as_register(); 3082 Register res = op->result_opr()->as_register(); 3083 3084 assert_different_registers(val, crc, res); 3085 3086 __ load_const_optimized(res, StubRoutines::crc_table_addr()); 3087 __ kernel_crc32_singleByteReg(crc, val, res, true); 3088 __ z_lgfr(res, crc); 3089 } 3090 3091 #undef __