1 /* 2 * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2023 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "gc/shared/cardTable.hpp" 33 #include "gc/shared/cardTableBarrierSet.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "memory/universe.hpp" 36 #include "nativeInst_s390.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "register_s390.hpp" 40 #include "registerSaver_s390.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/signature.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/vframeArray.hpp" 45 #include "utilities/macros.hpp" 46 #include "utilities/powerOfTwo.hpp" 47 #include "vmreg_s390.inline.hpp" 48 49 // Implementation of StubAssembler 50 51 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) { 52 set_num_rt_args(0); // Nothing on stack. 53 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 54 55 // We cannot trust that code generated by the C++ compiler saves R14 56 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at 57 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). 58 // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save 59 // it into the frame anchor. 60 address pc = get_PC(Z_R1_scratch); 61 int call_offset = (int)(pc - addr_at(0)); 62 set_last_Java_frame(Z_SP, Z_R1_scratch); 63 64 // ARG1 must hold thread address. 65 z_lgr(Z_ARG1, Z_thread); 66 67 address return_pc = nullptr; 68 align_call_far_patchable(this->pc()); 69 return_pc = call_c_opt(entry_point); 70 assert(return_pc != nullptr, "const section overflow"); 71 72 reset_last_Java_frame(); 73 74 // Check for pending exceptions. 75 { 76 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); 77 78 // This used to conditionally jump to forward_exception however it is 79 // possible if we relocate that the branch will not reach. So we must jump 80 // around so we can always reach. 81 82 Label ok; 83 z_bre(ok); // Bcondequal is the same as bcondZero. 84 85 // exception pending => forward to exception handler 86 87 // Make sure that the vm_results are cleared. 88 if (oop_result1->is_valid()) { 89 clear_mem(Address(Z_thread, JavaThread::vm_result_oop_offset()), sizeof(jlong)); 90 } 91 if (metadata_result->is_valid()) { 92 clear_mem(Address(Z_thread, JavaThread::vm_result_metadata_offset()), sizeof(jlong)); 93 } 94 if (frame_size() == no_frame_size) { 95 // Pop the stub frame. 96 pop_frame(); 97 restore_return_pc(); 98 load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); 99 z_br(Z_R1); 100 } else if (_stub_id == (int)C1StubId::forward_exception_id) { 101 should_not_reach_here(); 102 } else { 103 load_const_optimized(Z_R1, Runtime1::entry_for (C1StubId::forward_exception_id)); 104 z_br(Z_R1); 105 } 106 107 bind(ok); 108 } 109 110 // Get oop results if there are any and reset the values in the thread. 111 if (oop_result1->is_valid()) { 112 get_vm_result_oop(oop_result1); 113 } 114 if (metadata_result->is_valid()) { 115 get_vm_result_metadata(metadata_result); 116 } 117 118 return call_offset; 119 } 120 121 122 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 123 // Z_ARG1 is reserved for the thread. 124 lgr_if_needed(Z_ARG2, arg1); 125 return call_RT(oop_result1, metadata_result, entry, 1); 126 } 127 128 129 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 130 // Z_ARG1 is reserved for the thread. 131 lgr_if_needed(Z_ARG2, arg1); 132 assert(arg2 != Z_ARG2, "smashed argument"); 133 lgr_if_needed(Z_ARG3, arg2); 134 return call_RT(oop_result1, metadata_result, entry, 2); 135 } 136 137 138 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 139 // Z_ARG1 is reserved for the thread. 140 lgr_if_needed(Z_ARG2, arg1); 141 assert(arg2 != Z_ARG2, "smashed argument"); 142 lgr_if_needed(Z_ARG3, arg2); 143 assert(arg3 != Z_ARG3, "smashed argument"); 144 lgr_if_needed(Z_ARG4, arg3); 145 return call_RT(oop_result1, metadata_result, entry, 3); 146 } 147 148 149 // Implementation of Runtime1 150 151 #define __ sasm-> 152 153 #ifndef PRODUCT 154 #undef __ 155 #define __ (Verbose ? (sasm->block_comment(FILE_AND_LINE),sasm):sasm)-> 156 #endif // !PRODUCT 157 158 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str) 159 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 160 161 static OopMap* generate_oop_map(StubAssembler* sasm) { 162 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers; 163 int frame_size_in_slots = 164 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 165 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 166 return RegisterSaver::generate_oop_map(sasm, reg_set); 167 } 168 169 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, Register return_pc = Z_R14) { 170 __ block_comment("save_live_registers"); 171 RegisterSaver::RegisterSet reg_set = 172 save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers; 173 int frame_size_in_slots = 174 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 175 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 176 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc); 177 } 178 179 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) { 180 if (!save_fpu_registers) { 181 __ unimplemented(FILE_AND_LINE); 182 } 183 __ block_comment("save_live_registers"); 184 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2; 185 int frame_size_in_slots = 186 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 187 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 188 return RegisterSaver::save_live_registers(sasm, reg_set); 189 } 190 191 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 192 __ block_comment("restore_live_registers"); 193 RegisterSaver::RegisterSet reg_set = 194 restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers; 195 RegisterSaver::restore_live_registers(sasm, reg_set); 196 } 197 198 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) { 199 if (!restore_fpu_registers) { 200 __ unimplemented(FILE_AND_LINE); 201 } 202 __ block_comment("restore_live_registers_except_r2"); 203 RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2); 204 } 205 206 void Runtime1::initialize_pd() { 207 // Nothing to do. 208 } 209 210 uint Runtime1::runtime_blob_current_thread_offset(frame f) { 211 Unimplemented(); 212 return 0; 213 } 214 215 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 216 // Make a frame and preserve the caller's caller-save registers. 217 OopMap* oop_map = save_live_registers(sasm); 218 int call_offset; 219 if (!has_argument) { 220 call_offset = __ call_RT(noreg, noreg, target); 221 } else { 222 call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch); 223 } 224 OopMapSet* oop_maps = new OopMapSet(); 225 oop_maps->add_gc_map(call_offset, oop_map); 226 227 __ should_not_reach_here(); 228 return oop_maps; 229 } 230 231 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 232 // Incoming parameters: Z_EXC_OOP and Z_EXC_PC. 233 // Keep copies in callee-saved registers during runtime call. 234 const Register exception_oop_callee_saved = Z_R11; 235 const Register exception_pc_callee_saved = Z_R12; 236 // Other registers used in this stub. 237 const Register handler_addr = Z_R4; 238 239 if (AbortVMOnException) { 240 save_live_registers(sasm); 241 __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Z_EXC_OOP); 242 restore_live_registers(sasm); 243 } 244 245 // Verify that only exception_oop, is valid at this time. 246 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC); 247 248 // Check that fields in JavaThread for exception oop and issuing pc are set. 249 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0); 250 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0); 251 252 // Save exception_oop and pc in callee-saved register to preserve it 253 // during runtime calls. 254 __ verify_not_null_oop(Z_EXC_OOP); 255 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); 256 __ lgr_if_needed(exception_pc_callee_saved, Z_EXC_PC); 257 258 __ push_frame_abi160(0); // Runtime code needs the z_abi_160. 259 260 // Search the exception handler address of the caller (using the return address). 261 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Z_thread, Z_EXC_PC); 262 // Z_RET(Z_R2): exception handler address of the caller. 263 264 __ pop_frame(); 265 266 __ invalidate_registers(exception_oop_callee_saved, exception_pc_callee_saved, Z_RET); 267 268 // Move result of call into correct register. 269 __ lgr_if_needed(handler_addr, Z_RET); 270 271 // Restore exception oop and pc to Z_EXC_OOP and Z_EXC_PC (required convention of exception handler). 272 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); 273 __ lgr_if_needed(Z_EXC_PC, exception_pc_callee_saved); 274 275 // Verify that there is really a valid exception in Z_EXC_OOP. 276 __ verify_not_null_oop(Z_EXC_OOP); 277 278 __ z_br(handler_addr); // Jump to exception handler. 279 } 280 281 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 282 // Make a frame and preserve the caller's caller-save registers. 283 OopMap* oop_map = save_live_registers(sasm); 284 285 // Call the runtime patching routine, returns non-zero if nmethod got deopted. 286 int call_offset = __ call_RT(noreg, noreg, target); 287 OopMapSet* oop_maps = new OopMapSet(); 288 oop_maps->add_gc_map(call_offset, oop_map); 289 290 // Re-execute the patched instruction or, if the nmethod was 291 // deoptmized, return to the deoptimization handler entry that will 292 // cause re-execution of the current bytecode. 293 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 294 assert(deopt_blob != nullptr, "deoptimization blob must have been created"); 295 296 __ z_ltr(Z_RET, Z_RET); // return value == 0 297 298 restore_live_registers(sasm); 299 300 __ z_bcr(Assembler::bcondZero, Z_R14); 301 302 // Return to the deoptimization handler entry for unpacking and 303 // rexecute if we simply returned then we'd deopt as if any call we 304 // patched had just returned. 305 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 306 __ load_const_optimized(Z_R1_scratch, dest); 307 __ z_br(Z_R1_scratch); 308 309 return oop_maps; 310 } 311 312 OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { 313 314 // for better readability 315 const bool must_gc_arguments = true; 316 const bool dont_gc_arguments = false; 317 318 // Default value; overwritten for some optimized stubs that are 319 // called from methods that do not use the fpu. 320 bool save_fpu_registers = true; 321 322 // Stub code and info for the different stubs. 323 OopMapSet* oop_maps = nullptr; 324 switch (id) { 325 case C1StubId::forward_exception_id: 326 { 327 oop_maps = generate_handle_exception(id, sasm); 328 // will not return 329 } 330 break; 331 332 case C1StubId::new_instance_id: 333 case C1StubId::fast_new_instance_id: 334 case C1StubId::fast_new_instance_init_check_id: 335 { 336 Register klass = Z_R11; // Incoming 337 Register obj = Z_R2; // Result 338 339 if (id == C1StubId::new_instance_id) { 340 __ set_info("new_instance", dont_gc_arguments); 341 } else if (id == C1StubId::fast_new_instance_id) { 342 __ set_info("fast new_instance", dont_gc_arguments); 343 } else { 344 assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); 345 __ set_info("fast new_instance init check", dont_gc_arguments); 346 } 347 348 OopMap* map = save_live_registers_except_r2(sasm); 349 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 350 oop_maps = new OopMapSet(); 351 oop_maps->add_gc_map(call_offset, map); 352 restore_live_registers_except_r2(sasm); 353 354 __ verify_oop(obj, FILE_AND_LINE); 355 __ z_br(Z_R14); 356 } 357 break; 358 359 case C1StubId::counter_overflow_id: 360 { 361 // Arguments : 362 // bci : stack param 0 363 // method : stack param 1 364 // 365 Register bci = Z_ARG2, method = Z_ARG3; 366 // frame size in bytes 367 OopMap* map = save_live_registers(sasm); 368 const int frame_size = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size; 369 __ z_lg(bci, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 370 __ z_lg(method, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 371 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 372 oop_maps = new OopMapSet(); 373 oop_maps->add_gc_map(call_offset, map); 374 restore_live_registers(sasm); 375 __ z_br(Z_R14); 376 } 377 break; 378 case C1StubId::new_type_array_id: 379 case C1StubId::new_object_array_id: 380 { 381 Register length = Z_R13; // Incoming 382 Register klass = Z_R11; // Incoming 383 Register obj = Z_R2; // Result 384 385 if (id == C1StubId::new_type_array_id) { 386 __ set_info("new_type_array", dont_gc_arguments); 387 } else { 388 __ set_info("new_object_array", dont_gc_arguments); 389 } 390 391 #ifdef ASSERT 392 // Assert object type is really an array of the proper kind. 393 { 394 NearLabel ok; 395 Register t0 = obj; 396 __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false); 397 __ z_sra(t0, Klass::_lh_array_tag_shift); 398 int tag = ((id == C1StubId::new_type_array_id) 399 ? Klass::_lh_array_tag_type_value 400 : Klass::_lh_array_tag_obj_value); 401 __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok); 402 __ stop("assert(is an array klass)"); 403 __ should_not_reach_here(); 404 __ bind(ok); 405 } 406 #endif // ASSERT 407 408 OopMap* map = save_live_registers_except_r2(sasm); 409 int call_offset; 410 if (id == C1StubId::new_type_array_id) { 411 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 412 } else { 413 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 414 } 415 416 oop_maps = new OopMapSet(); 417 oop_maps->add_gc_map(call_offset, map); 418 restore_live_registers_except_r2(sasm); 419 420 __ verify_oop(obj, FILE_AND_LINE); 421 __ z_br(Z_R14); 422 } 423 break; 424 425 case C1StubId::new_multi_array_id: 426 { __ set_info("new_multi_array", dont_gc_arguments); 427 // Z_R3,: klass 428 // Z_R4,: rank 429 // Z_R5: address of 1st dimension 430 OopMap* map = save_live_registers(sasm); 431 int call_offset = __ call_RT(Z_R2, noreg, CAST_FROM_FN_PTR(address, new_multi_array), Z_R3, Z_R4, Z_R5); 432 433 oop_maps = new OopMapSet(); 434 oop_maps->add_gc_map(call_offset, map); 435 restore_live_registers_except_r2(sasm); 436 437 // Z_R2,: new multi array 438 __ verify_oop(Z_R2, FILE_AND_LINE); 439 __ z_br(Z_R14); 440 } 441 break; 442 443 case C1StubId::register_finalizer_id: 444 { 445 __ set_info("register_finalizer", dont_gc_arguments); 446 447 // Load the klass and check the has finalizer flag. 448 Register klass = Z_ARG2; 449 __ load_klass(klass, Z_ARG1); 450 __ z_tm(Address(klass, Klass::misc_flags_offset()), KlassFlags::_misc_has_finalizer); 451 __ z_bcr(Assembler::bcondAllZero, Z_R14); // Return if bit is not set. 452 453 OopMap* oop_map = save_live_registers(sasm); 454 int call_offset = __ call_RT(noreg, noreg, 455 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), Z_ARG1); 456 oop_maps = new OopMapSet(); 457 oop_maps->add_gc_map(call_offset, oop_map); 458 459 // Now restore all the live registers. 460 restore_live_registers(sasm); 461 462 __ z_br(Z_R14); 463 } 464 break; 465 466 case C1StubId::throw_range_check_failed_id: 467 { __ set_info("range_check_failed", dont_gc_arguments); 468 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 469 } 470 break; 471 472 case C1StubId::throw_index_exception_id: 473 { __ set_info("index_range_check_failed", dont_gc_arguments); 474 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 475 } 476 break; 477 case C1StubId::throw_div0_exception_id: 478 { __ set_info("throw_div0_exception", dont_gc_arguments); 479 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 480 } 481 break; 482 case C1StubId::throw_null_pointer_exception_id: 483 { __ set_info("throw_null_pointer_exception", dont_gc_arguments); 484 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 485 } 486 break; 487 case C1StubId::handle_exception_nofpu_id: 488 case C1StubId::handle_exception_id: 489 { __ set_info("handle_exception", dont_gc_arguments); 490 oop_maps = generate_handle_exception(id, sasm); 491 } 492 break; 493 case C1StubId::handle_exception_from_callee_id: 494 { __ set_info("handle_exception_from_callee", dont_gc_arguments); 495 oop_maps = generate_handle_exception(id, sasm); 496 } 497 break; 498 case C1StubId::unwind_exception_id: 499 { __ set_info("unwind_exception", dont_gc_arguments); 500 // Note: no stubframe since we are about to leave the current 501 // activation and we are calling a leaf VM function only. 502 generate_unwind_exception(sasm); 503 } 504 break; 505 case C1StubId::throw_array_store_exception_id: 506 { __ set_info("throw_array_store_exception", dont_gc_arguments); 507 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 508 } 509 break; 510 case C1StubId::throw_class_cast_exception_id: 511 { // Z_R1_scratch: object 512 __ set_info("throw_class_cast_exception", dont_gc_arguments); 513 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 514 } 515 break; 516 case C1StubId::throw_incompatible_class_change_error_id: 517 { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 518 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 519 } 520 break; 521 case C1StubId::slow_subtype_check_id: 522 { 523 // Arguments : 524 // sub : stack param 0 525 // super: stack param 1 526 // raddr: Z_R14, blown by call 527 // 528 // Result : condition code 0 for match (bcondEqual will be true), 529 // condition code 2 for miss (bcondNotEqual will be true) 530 NearLabel miss; 531 const Register Rsubklass = Z_ARG2; // sub 532 const Register Rsuperklass = Z_ARG3; // super 533 534 // No args, but tmp registers that are killed. 535 const Register Rlength = Z_ARG4; // cache array length 536 const Register Rarray_ptr = Z_ARG5; // Current value from cache array. 537 538 if (UseCompressedOops) { 539 assert(Universe::heap() != nullptr, "java heap must be initialized to generate partial_subtype_check stub"); 540 } 541 542 const int frame_size = 4*BytesPerWord + frame::z_abi_160_size; 543 // Save return pc. This is not necessary, but could be helpful 544 // in the case of crashes. 545 __ save_return_pc(); 546 __ push_frame(frame_size); 547 // Save registers before changing them. 548 int i = 0; 549 __ z_stg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 550 __ z_stg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 551 __ z_stg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 552 __ z_stg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 553 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 554 555 // Get sub and super from stack. 556 __ z_lg(Rsubklass, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 557 __ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 558 559 __ check_klass_subtype_slow_path(Rsubklass, 560 Rsuperklass, 561 Rarray_ptr /* temp_reg */, 562 Rlength /* temp2_reg */, 563 nullptr /* L_success */, 564 &miss /* L_failure */); 565 566 // Match falls through here. 567 i = 0; 568 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 569 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 570 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 571 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 572 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 573 __ pop_frame(); 574 // Return pc is still in R_14. 575 __ clear_reg(Z_R0_scratch); // Zero indicates a match. Set CC 0 (bcondEqual will be true) 576 __ z_br(Z_R14); 577 578 __ BIND(miss); 579 i = 0; 580 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 581 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 582 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 583 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 584 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 585 __ pop_frame(); 586 // return pc is still in R_14 587 __ load_const_optimized(Z_R0_scratch, 1); // One indicates a miss. 588 __ z_ltgr(Z_R0_scratch, Z_R0_scratch); // Set CC 2 (bcondNotEqual will be true). 589 __ z_br(Z_R14); 590 } 591 break; 592 case C1StubId::is_instance_of_id: 593 { 594 // Mirror: Z_ARG1(R2) 595 // Object: Z_ARG2 596 // Temps: Z_ARG3, Z_ARG4, Z_ARG5, Z_R10, Z_R11 597 // Result: Z_RET(R2) 598 599 // Get the Klass* into Z_ARG3 600 Register klass = Z_ARG3 , obj = Z_ARG2, result = Z_RET; 601 Register temp0 = Z_ARG4, temp1 = Z_ARG5, temp2 = Z_R10, temp3 = Z_R11; 602 603 __ z_ltg(klass, Address(Z_ARG1, java_lang_Class::klass_offset())); // Klass is null 604 605 Label is_secondary; 606 607 __ clear_reg(result /* Z_R2 */, true /* whole_reg */, false /* set_cc */); // sets result=0 (failure) 608 609 __ z_bcr(Assembler::bcondEqual, Z_R14); // cc set by z_ltg above 610 611 __ z_ltgr(obj, obj); // obj is null 612 __ z_bcr(Assembler::bcondEqual, Z_R14); 613 614 __ z_llgf(temp0, Address(klass, in_bytes(Klass::super_check_offset_offset()))); 615 __ compare32_and_branch(temp0, in_bytes(Klass::secondary_super_cache_offset()), Assembler::bcondEqual, is_secondary); // Klass is a secondary superclass 616 617 // Klass is a concrete class 618 __ load_klass(temp1, obj); 619 __ z_cg(klass, Address(temp1, temp0)); 620 621 // result is already holding 0, denoting NotEqual case 622 __ load_on_condition_imm_32(result, 1, Assembler::bcondEqual); 623 __ z_br(Z_R14); 624 625 __ bind(is_secondary); 626 627 __ load_klass(obj, obj); 628 629 // This is necessary because I am never in my own secondary_super list. 630 __ z_cgr(obj, klass); 631 __ load_on_condition_imm_32(result, 1, Assembler::bcondEqual); 632 __ z_bcr(Assembler::bcondEqual, Z_R14); 633 634 // Z_R10 and Z_R11 are callee saved, so we must need to preserve them before any use 635 __ z_ldgr(Z_F1, Z_R10); 636 __ z_ldgr(Z_F3, Z_R11); 637 638 __ lookup_secondary_supers_table_var(obj, klass, 639 /*temps*/ temp0, temp1, temp2, temp3, 640 result); 641 642 // lookup_secondary_supers_table_var return 0 on success and 1 on failure. 643 // but this method returns 0 on failure and 1 on success. 644 // so we have to invert the result from lookup_secondary_supers_table_var. 645 __ z_xilf(result, 1); // invert the result 646 647 __ z_lgdr(Z_R10, Z_F1); 648 __ z_lgdr(Z_R11, Z_F3); 649 650 __ z_br(Z_R14); 651 652 } 653 case C1StubId::monitorenter_nofpu_id: 654 case C1StubId::monitorenter_id: 655 { // Z_R1_scratch : object 656 // Z_R13 : lock address (see LIRGenerator::syncTempOpr()) 657 __ set_info("monitorenter", dont_gc_arguments); 658 659 int save_fpu_registers = (id == C1StubId::monitorenter_id); 660 // Make a frame and preserve the caller's caller-save registers. 661 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 662 663 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), Z_R1_scratch, Z_R13); 664 665 oop_maps = new OopMapSet(); 666 oop_maps->add_gc_map(call_offset, oop_map); 667 restore_live_registers(sasm, save_fpu_registers); 668 669 __ z_br(Z_R14); 670 } 671 break; 672 673 case C1StubId::monitorexit_nofpu_id: 674 case C1StubId::monitorexit_id: 675 { // Z_R1_scratch : lock address 676 // Note: really a leaf routine but must setup last java sp 677 // => Use call_RT for now (speed can be improved by 678 // doing last java sp setup manually). 679 __ set_info("monitorexit", dont_gc_arguments); 680 681 int save_fpu_registers = (id == C1StubId::monitorexit_id); 682 // Make a frame and preserve the caller's caller-save registers. 683 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 684 685 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), Z_R1_scratch); 686 687 oop_maps = new OopMapSet(); 688 oop_maps->add_gc_map(call_offset, oop_map); 689 restore_live_registers(sasm, save_fpu_registers); 690 691 __ z_br(Z_R14); 692 } 693 break; 694 695 case C1StubId::deoptimize_id: 696 { // Args: Z_R1_scratch: trap request 697 __ set_info("deoptimize", dont_gc_arguments); 698 Register trap_request = Z_R1_scratch; 699 OopMap* oop_map = save_live_registers(sasm); 700 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request); 701 oop_maps = new OopMapSet(); 702 oop_maps->add_gc_map(call_offset, oop_map); 703 restore_live_registers(sasm); 704 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 705 assert(deopt_blob != nullptr, "deoptimization blob must have been created"); 706 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 707 __ load_const_optimized(Z_R1_scratch, dest); 708 __ z_br(Z_R1_scratch); 709 } 710 break; 711 712 case C1StubId::access_field_patching_id: 713 { __ set_info("access_field_patching", dont_gc_arguments); 714 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 715 } 716 break; 717 718 case C1StubId::load_klass_patching_id: 719 { __ set_info("load_klass_patching", dont_gc_arguments); 720 // We should set up register map. 721 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 722 } 723 break; 724 725 case C1StubId::load_mirror_patching_id: 726 { __ set_info("load_mirror_patching", dont_gc_arguments); 727 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 728 } 729 break; 730 731 case C1StubId::load_appendix_patching_id: 732 { __ set_info("load_appendix_patching", dont_gc_arguments); 733 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 734 } 735 break; 736 #if 0 737 case C1StubId::dtrace_object_alloc_id: 738 { // rax,: object 739 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 740 // We can't gc here so skip the oopmap but make sure that all 741 // the live registers get saved. 742 save_live_registers(sasm, 1); 743 744 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 745 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)))); 746 NOT_LP64(__ pop(rax)); 747 748 restore_live_registers(sasm); 749 } 750 break; 751 752 case C1StubId::fpu2long_stub_id: 753 { 754 // rax, and rdx are destroyed, but should be free since the result is returned there 755 // preserve rsi,ecx 756 __ push(rsi); 757 __ push(rcx); 758 LP64_ONLY(__ push(rdx);) 759 760 // check for NaN 761 Label return0, do_return, return_min_jlong, do_convert; 762 763 Address value_high_word(rsp, wordSize + 4); 764 Address value_low_word(rsp, wordSize); 765 Address result_high_word(rsp, 3*wordSize + 4); 766 Address result_low_word(rsp, 3*wordSize); 767 768 __ subptr(rsp, 32); // more than enough on 32bit 769 __ fst_d(value_low_word); 770 __ movl(rax, value_high_word); 771 __ andl(rax, 0x7ff00000); 772 __ cmpl(rax, 0x7ff00000); 773 __ jcc(Assembler::notEqual, do_convert); 774 __ movl(rax, value_high_word); 775 __ andl(rax, 0xfffff); 776 __ orl(rax, value_low_word); 777 __ jcc(Assembler::notZero, return0); 778 779 __ bind(do_convert); 780 __ fnstcw(Address(rsp, 0)); 781 __ movzwl(rax, Address(rsp, 0)); 782 __ orl(rax, 0xc00); 783 __ movw(Address(rsp, 2), rax); 784 __ fldcw(Address(rsp, 2)); 785 __ fwait(); 786 __ fistp_d(result_low_word); 787 __ fldcw(Address(rsp, 0)); 788 __ fwait(); 789 // This gets the entire long in rax on 64bit 790 __ movptr(rax, result_low_word); 791 // testing of high bits 792 __ movl(rdx, result_high_word); 793 __ mov(rcx, rax); 794 // What the heck is the point of the next instruction??? 795 __ xorl(rcx, 0x0); 796 __ movl(rsi, 0x80000000); 797 __ xorl(rsi, rdx); 798 __ orl(rcx, rsi); 799 __ jcc(Assembler::notEqual, do_return); 800 __ fldz(); 801 __ fcomp_d(value_low_word); 802 __ fnstsw_ax(); 803 __ testl(rax, 0x4100); // ZF & CF == 0 804 __ jcc(Assembler::equal, return_min_jlong); 805 // return max_jlong 806 __ mov64(rax, CONST64(0x7fffffffffffffff)); 807 __ jmp(do_return); 808 809 __ bind(return_min_jlong); 810 __ mov64(rax, UCONST64(0x8000000000000000)); 811 __ jmp(do_return); 812 813 __ bind(return0); 814 __ fpop(); 815 __ xorptr(rax, rax); 816 817 __ bind(do_return); 818 __ addptr(rsp, 32); 819 LP64_ONLY(__ pop(rdx);) 820 __ pop(rcx); 821 __ pop(rsi); 822 __ ret(0); 823 } 824 break; 825 #endif // TODO 826 827 case C1StubId::predicate_failed_trap_id: 828 { 829 __ set_info("predicate_failed_trap", dont_gc_arguments); 830 831 OopMap* map = save_live_registers(sasm); 832 833 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 834 oop_maps = new OopMapSet(); 835 oop_maps->add_gc_map(call_offset, map); 836 restore_live_registers(sasm); 837 838 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 839 assert(deopt_blob != nullptr, "deoptimization blob must have been created"); 840 841 __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution()); 842 __ z_br(Z_R1_scratch); 843 } 844 break; 845 846 default: 847 { 848 __ should_not_reach_here(FILE_AND_LINE, (int)id); 849 } 850 break; 851 } 852 return oop_maps; 853 } 854 855 OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { 856 __ block_comment("generate_handle_exception"); 857 858 // incoming parameters: Z_EXC_OOP, Z_EXC_PC 859 860 // Save registers if required. 861 OopMapSet* oop_maps = new OopMapSet(); 862 OopMap* oop_map = nullptr; 863 Register reg_fp = Z_R1_scratch; 864 865 switch (id) { 866 case C1StubId::forward_exception_id: { 867 // We're handling an exception in the context of a compiled frame. 868 // The registers have been saved in the standard places. Perform 869 // an exception lookup in the caller and dispatch to the handler 870 // if found. Otherwise unwind and dispatch to the callers 871 // exception handler. 872 oop_map = generate_oop_map(sasm); 873 874 // Load and clear pending exception oop into. 875 __ z_lg(Z_EXC_OOP, Address(Z_thread, Thread::pending_exception_offset())); 876 __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), 8); 877 878 // Different stubs forward their exceptions; they should all have similar frame layouts 879 // (a) to find their return address (b) for a correct oop_map generated above. 880 assert(RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers) == 881 RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers_except_r2), "requirement"); 882 883 // Load issuing PC (the return address for this stub). 884 const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size; 885 __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_common_abi(return_pc))); 886 DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));) 887 888 // Make sure that the vm_results are cleared (may be unnecessary). 889 __ clear_mem(Address(Z_thread, JavaThread::vm_result_oop_offset()), sizeof(oop)); 890 __ clear_mem(Address(Z_thread, JavaThread::vm_result_metadata_offset()), sizeof(Metadata*)); 891 break; 892 } 893 case C1StubId::handle_exception_nofpu_id: 894 case C1StubId::handle_exception_id: 895 // At this point all registers MAY be live. 896 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) 897 oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id, Z_EXC_PC); 898 break; 899 case C1StubId::handle_exception_from_callee_id: { 900 // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead. 901 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) 902 __ save_return_pc(Z_EXC_PC); 903 const int frame_size_in_bytes = __ push_frame_abi160(0); 904 oop_map = new OopMap(frame_size_in_bytes / VMRegImpl::stack_slot_size, 0); 905 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 906 break; 907 } 908 default: ShouldNotReachHere(); 909 } 910 911 // Verify that only Z_EXC_OOP, and Z_EXC_PC are valid at this time. 912 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC, reg_fp); 913 // Verify that Z_EXC_OOP, contains a valid exception. 914 __ verify_not_null_oop(Z_EXC_OOP); 915 916 // Check that fields in JavaThread for exception oop and issuing pc 917 // are empty before writing to them. 918 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0); 919 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0); 920 921 // Save exception oop and issuing pc into JavaThread. 922 // (Exception handler will load it from here.) 923 __ z_stg(Z_EXC_OOP, Address(Z_thread, JavaThread::exception_oop_offset())); 924 __ z_stg(Z_EXC_PC, Address(Z_thread, JavaThread::exception_pc_offset())); 925 926 #ifdef ASSERT 927 { NearLabel ok; 928 __ z_cg(Z_EXC_PC, Address(reg_fp, _z_common_abi(return_pc))); 929 __ branch_optimized(Assembler::bcondEqual, ok); 930 __ stop("use throwing pc as return address (has bci & oop map)"); 931 __ bind(ok); 932 } 933 #endif 934 935 // Compute the exception handler. 936 // The exception oop and the throwing pc are read from the fields in JavaThread. 937 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 938 oop_maps->add_gc_map(call_offset, oop_map); 939 940 // Z_RET(Z_R2): handler address 941 // will be the deopt blob if nmethod was deoptimized while we looked up 942 // handler regardless of whether handler existed in the nmethod. 943 944 // Only Z_R2, is valid at this time, all other registers have been destroyed by the runtime call. 945 __ invalidate_registers(Z_R2); 946 947 switch(id) { 948 case C1StubId::forward_exception_id: 949 case C1StubId::handle_exception_nofpu_id: 950 case C1StubId::handle_exception_id: 951 // Restore the registers that were saved at the beginning. 952 __ z_lgr(Z_R1_scratch, Z_R2); // Restoring live registers kills Z_R2. 953 restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); // Pops as well the frame. 954 __ z_br(Z_R1_scratch); 955 break; 956 case C1StubId::handle_exception_from_callee_id: { 957 __ pop_frame(); 958 __ z_br(Z_R2); // Jump to exception handler. 959 } 960 break; 961 default: ShouldNotReachHere(); 962 } 963 964 return oop_maps; 965 } 966 967 968 #undef __ 969 970 const char *Runtime1::pd_name_for_address(address entry) { 971 return "<unknown function>"; 972 }