1 /* 2 * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2023 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Defs.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "ci/ciUtilities.hpp" 32 #include "compiler/oopMap.hpp" 33 #include "gc/shared/cardTable.hpp" 34 #include "gc/shared/cardTableBarrierSet.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "memory/universe.hpp" 37 #include "nativeInst_s390.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "register_s390.hpp" 41 #include "registerSaver_s390.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/signature.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/macros.hpp" 47 #include "utilities/powerOfTwo.hpp" 48 #include "vmreg_s390.inline.hpp" 49 50 // Implementation of StubAssembler 51 52 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) { 53 set_num_rt_args(0); // Nothing on stack. 54 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 55 56 // We cannot trust that code generated by the C++ compiler saves R14 57 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at 58 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). 59 // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save 60 // it into the frame anchor. 61 address pc = get_PC(Z_R1_scratch); 62 int call_offset = (int)(pc - addr_at(0)); 63 set_last_Java_frame(Z_SP, Z_R1_scratch); 64 65 // ARG1 must hold thread address. 66 z_lgr(Z_ARG1, Z_thread); 67 68 address return_pc = nullptr; 69 align_call_far_patchable(this->pc()); 70 return_pc = call_c_opt(entry_point); 71 assert(return_pc != nullptr, "const section overflow"); 72 73 reset_last_Java_frame(); 74 75 // Check for pending exceptions. 76 { 77 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); 78 79 // This used to conditionally jump to forward_exception however it is 80 // possible if we relocate that the branch will not reach. So we must jump 81 // around so we can always reach. 82 83 Label ok; 84 z_bre(ok); // Bcondequal is the same as bcondZero. 85 86 // exception pending => forward to exception handler 87 88 // Make sure that the vm_results are cleared. 89 if (oop_result1->is_valid()) { 90 clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(jlong)); 91 } 92 if (metadata_result->is_valid()) { 93 clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(jlong)); 94 } 95 if (frame_size() == no_frame_size) { 96 // Pop the stub frame. 97 pop_frame(); 98 restore_return_pc(); 99 load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); 100 z_br(Z_R1); 101 } else if (_stub_id == Runtime1::forward_exception_id) { 102 should_not_reach_here(); 103 } else { 104 load_const_optimized(Z_R1, Runtime1::entry_for (Runtime1::forward_exception_id)); 105 z_br(Z_R1); 106 } 107 108 bind(ok); 109 } 110 111 // Get oop results if there are any and reset the values in the thread. 112 if (oop_result1->is_valid()) { 113 get_vm_result(oop_result1); 114 } 115 if (metadata_result->is_valid()) { 116 get_vm_result_2(metadata_result); 117 } 118 119 return call_offset; 120 } 121 122 123 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 124 // Z_ARG1 is reserved for the thread. 125 lgr_if_needed(Z_ARG2, arg1); 126 return call_RT(oop_result1, metadata_result, entry, 1); 127 } 128 129 130 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 131 // Z_ARG1 is reserved for the thread. 132 lgr_if_needed(Z_ARG2, arg1); 133 assert(arg2 != Z_ARG2, "smashed argument"); 134 lgr_if_needed(Z_ARG3, arg2); 135 return call_RT(oop_result1, metadata_result, entry, 2); 136 } 137 138 139 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 140 // Z_ARG1 is reserved for the thread. 141 lgr_if_needed(Z_ARG2, arg1); 142 assert(arg2 != Z_ARG2, "smashed argument"); 143 lgr_if_needed(Z_ARG3, arg2); 144 assert(arg3 != Z_ARG3, "smashed argument"); 145 lgr_if_needed(Z_ARG4, arg3); 146 return call_RT(oop_result1, metadata_result, entry, 3); 147 } 148 149 150 // Implementation of Runtime1 151 152 #define __ sasm-> 153 154 #ifndef PRODUCT 155 #undef __ 156 #define __ (Verbose ? (sasm->block_comment(FILE_AND_LINE),sasm):sasm)-> 157 #endif // !PRODUCT 158 159 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str) 160 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 161 162 static OopMap* generate_oop_map(StubAssembler* sasm) { 163 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers; 164 int frame_size_in_slots = 165 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 166 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 167 return RegisterSaver::generate_oop_map(sasm, reg_set); 168 } 169 170 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, Register return_pc = Z_R14) { 171 __ block_comment("save_live_registers"); 172 RegisterSaver::RegisterSet reg_set = 173 save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers; 174 int frame_size_in_slots = 175 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 176 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 177 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc); 178 } 179 180 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) { 181 if (!save_fpu_registers) { 182 __ unimplemented(FILE_AND_LINE); 183 } 184 __ block_comment("save_live_registers"); 185 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2; 186 int frame_size_in_slots = 187 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 188 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 189 return RegisterSaver::save_live_registers(sasm, reg_set); 190 } 191 192 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 193 __ block_comment("restore_live_registers"); 194 RegisterSaver::RegisterSet reg_set = 195 restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers; 196 RegisterSaver::restore_live_registers(sasm, reg_set); 197 } 198 199 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) { 200 if (!restore_fpu_registers) { 201 __ unimplemented(FILE_AND_LINE); 202 } 203 __ block_comment("restore_live_registers_except_r2"); 204 RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2); 205 } 206 207 void Runtime1::initialize_pd() { 208 // Nothing to do. 209 } 210 211 uint Runtime1::runtime_blob_current_thread_offset(frame f) { 212 Unimplemented(); 213 return 0; 214 } 215 216 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 217 // Make a frame and preserve the caller's caller-save registers. 218 OopMap* oop_map = save_live_registers(sasm); 219 int call_offset; 220 if (!has_argument) { 221 call_offset = __ call_RT(noreg, noreg, target); 222 } else { 223 call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch); 224 } 225 OopMapSet* oop_maps = new OopMapSet(); 226 oop_maps->add_gc_map(call_offset, oop_map); 227 228 __ should_not_reach_here(); 229 return oop_maps; 230 } 231 232 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 233 // Incoming parameters: Z_EXC_OOP and Z_EXC_PC. 234 // Keep copies in callee-saved registers during runtime call. 235 const Register exception_oop_callee_saved = Z_R11; 236 const Register exception_pc_callee_saved = Z_R12; 237 // Other registers used in this stub. 238 const Register handler_addr = Z_R4; 239 240 if (AbortVMOnException) { 241 save_live_registers(sasm); 242 __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Z_EXC_OOP); 243 restore_live_registers(sasm); 244 } 245 246 // Verify that only exception_oop, is valid at this time. 247 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC); 248 249 // Check that fields in JavaThread for exception oop and issuing pc are set. 250 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0); 251 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0); 252 253 // Save exception_oop and pc in callee-saved register to preserve it 254 // during runtime calls. 255 __ verify_not_null_oop(Z_EXC_OOP); 256 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); 257 __ lgr_if_needed(exception_pc_callee_saved, Z_EXC_PC); 258 259 __ push_frame_abi160(0); // Runtime code needs the z_abi_160. 260 261 // Search the exception handler address of the caller (using the return address). 262 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Z_thread, Z_EXC_PC); 263 // Z_RET(Z_R2): exception handler address of the caller. 264 265 __ pop_frame(); 266 267 __ invalidate_registers(exception_oop_callee_saved, exception_pc_callee_saved, Z_RET); 268 269 // Move result of call into correct register. 270 __ lgr_if_needed(handler_addr, Z_RET); 271 272 // Restore exception oop and pc to Z_EXC_OOP and Z_EXC_PC (required convention of exception handler). 273 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); 274 __ lgr_if_needed(Z_EXC_PC, exception_pc_callee_saved); 275 276 // Verify that there is really a valid exception in Z_EXC_OOP. 277 __ verify_not_null_oop(Z_EXC_OOP); 278 279 __ z_br(handler_addr); // Jump to exception handler. 280 } 281 282 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 283 // Make a frame and preserve the caller's caller-save registers. 284 OopMap* oop_map = save_live_registers(sasm); 285 286 // Call the runtime patching routine, returns non-zero if nmethod got deopted. 287 int call_offset = __ call_RT(noreg, noreg, target); 288 OopMapSet* oop_maps = new OopMapSet(); 289 oop_maps->add_gc_map(call_offset, oop_map); 290 291 // Re-execute the patched instruction or, if the nmethod was 292 // deoptmized, return to the deoptimization handler entry that will 293 // cause re-execution of the current bytecode. 294 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 295 assert(deopt_blob != nullptr, "deoptimization blob must have been created"); 296 297 __ z_ltr(Z_RET, Z_RET); // return value == 0 298 299 restore_live_registers(sasm); 300 301 __ z_bcr(Assembler::bcondZero, Z_R14); 302 303 // Return to the deoptimization handler entry for unpacking and 304 // rexecute if we simply returned then we'd deopt as if any call we 305 // patched had just returned. 306 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 307 __ load_const_optimized(Z_R1_scratch, dest); 308 __ z_br(Z_R1_scratch); 309 310 return oop_maps; 311 } 312 313 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 314 315 // for better readability 316 const bool must_gc_arguments = true; 317 const bool dont_gc_arguments = false; 318 319 // Default value; overwritten for some optimized stubs that are 320 // called from methods that do not use the fpu. 321 bool save_fpu_registers = true; 322 323 // Stub code and info for the different stubs. 324 OopMapSet* oop_maps = nullptr; 325 switch (id) { 326 case forward_exception_id: 327 { 328 oop_maps = generate_handle_exception(id, sasm); 329 // will not return 330 } 331 break; 332 333 case new_instance_id: 334 case fast_new_instance_id: 335 case fast_new_instance_init_check_id: 336 { 337 Register klass = Z_R11; // Incoming 338 Register obj = Z_R2; // Result 339 340 if (id == new_instance_id) { 341 __ set_info("new_instance", dont_gc_arguments); 342 } else if (id == fast_new_instance_id) { 343 __ set_info("fast new_instance", dont_gc_arguments); 344 } else { 345 assert(id == fast_new_instance_init_check_id, "bad StubID"); 346 __ set_info("fast new_instance init check", dont_gc_arguments); 347 } 348 349 OopMap* map = save_live_registers_except_r2(sasm); 350 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 351 oop_maps = new OopMapSet(); 352 oop_maps->add_gc_map(call_offset, map); 353 restore_live_registers_except_r2(sasm); 354 355 __ verify_oop(obj, FILE_AND_LINE); 356 __ z_br(Z_R14); 357 } 358 break; 359 360 case counter_overflow_id: 361 { 362 // Arguments : 363 // bci : stack param 0 364 // method : stack param 1 365 // 366 Register bci = Z_ARG2, method = Z_ARG3; 367 // frame size in bytes 368 OopMap* map = save_live_registers(sasm); 369 const int frame_size = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size; 370 __ z_lg(bci, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 371 __ z_lg(method, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 372 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 373 oop_maps = new OopMapSet(); 374 oop_maps->add_gc_map(call_offset, map); 375 restore_live_registers(sasm); 376 __ z_br(Z_R14); 377 } 378 break; 379 case new_type_array_id: 380 case new_object_array_id: 381 { 382 Register length = Z_R13; // Incoming 383 Register klass = Z_R11; // Incoming 384 Register obj = Z_R2; // Result 385 386 if (id == new_type_array_id) { 387 __ set_info("new_type_array", dont_gc_arguments); 388 } else { 389 __ set_info("new_object_array", dont_gc_arguments); 390 } 391 392 #ifdef ASSERT 393 // Assert object type is really an array of the proper kind. 394 { 395 NearLabel ok; 396 Register t0 = obj; 397 __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false); 398 __ z_sra(t0, Klass::_lh_array_tag_shift); 399 int tag = ((id == new_type_array_id) 400 ? Klass::_lh_array_tag_type_value 401 : Klass::_lh_array_tag_obj_value); 402 __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok); 403 __ stop("assert(is an array klass)"); 404 __ should_not_reach_here(); 405 __ bind(ok); 406 } 407 #endif // ASSERT 408 409 OopMap* map = save_live_registers_except_r2(sasm); 410 int call_offset; 411 if (id == new_type_array_id) { 412 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 413 } else { 414 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 415 } 416 417 oop_maps = new OopMapSet(); 418 oop_maps->add_gc_map(call_offset, map); 419 restore_live_registers_except_r2(sasm); 420 421 __ verify_oop(obj, FILE_AND_LINE); 422 __ z_br(Z_R14); 423 } 424 break; 425 426 case new_multi_array_id: 427 { __ set_info("new_multi_array", dont_gc_arguments); 428 // Z_R3,: klass 429 // Z_R4,: rank 430 // Z_R5: address of 1st dimension 431 OopMap* map = save_live_registers(sasm); 432 int call_offset = __ call_RT(Z_R2, noreg, CAST_FROM_FN_PTR(address, new_multi_array), Z_R3, Z_R4, Z_R5); 433 434 oop_maps = new OopMapSet(); 435 oop_maps->add_gc_map(call_offset, map); 436 restore_live_registers_except_r2(sasm); 437 438 // Z_R2,: new multi array 439 __ verify_oop(Z_R2, FILE_AND_LINE); 440 __ z_br(Z_R14); 441 } 442 break; 443 444 case register_finalizer_id: 445 { 446 __ set_info("register_finalizer", dont_gc_arguments); 447 448 // Load the klass and check the has finalizer flag. 449 Register klass = Z_ARG2; 450 __ load_klass(klass, Z_ARG1); 451 __ testbit(Address(klass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER)); 452 __ z_bcr(Assembler::bcondAllZero, Z_R14); // Return if bit is not set. 453 454 OopMap* oop_map = save_live_registers(sasm); 455 int call_offset = __ call_RT(noreg, noreg, 456 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), Z_ARG1); 457 oop_maps = new OopMapSet(); 458 oop_maps->add_gc_map(call_offset, oop_map); 459 460 // Now restore all the live registers. 461 restore_live_registers(sasm); 462 463 __ z_br(Z_R14); 464 } 465 break; 466 467 case throw_range_check_failed_id: 468 { __ set_info("range_check_failed", dont_gc_arguments); 469 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 470 } 471 break; 472 473 case throw_index_exception_id: 474 { __ set_info("index_range_check_failed", dont_gc_arguments); 475 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 476 } 477 break; 478 case throw_div0_exception_id: 479 { __ set_info("throw_div0_exception", dont_gc_arguments); 480 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 481 } 482 break; 483 case throw_null_pointer_exception_id: 484 { __ set_info("throw_null_pointer_exception", dont_gc_arguments); 485 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 486 } 487 break; 488 case handle_exception_nofpu_id: 489 case handle_exception_id: 490 { __ set_info("handle_exception", dont_gc_arguments); 491 oop_maps = generate_handle_exception(id, sasm); 492 } 493 break; 494 case handle_exception_from_callee_id: 495 { __ set_info("handle_exception_from_callee", dont_gc_arguments); 496 oop_maps = generate_handle_exception(id, sasm); 497 } 498 break; 499 case unwind_exception_id: 500 { __ set_info("unwind_exception", dont_gc_arguments); 501 // Note: no stubframe since we are about to leave the current 502 // activation and we are calling a leaf VM function only. 503 generate_unwind_exception(sasm); 504 } 505 break; 506 case throw_array_store_exception_id: 507 { __ set_info("throw_array_store_exception", dont_gc_arguments); 508 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 509 } 510 break; 511 case throw_class_cast_exception_id: 512 { // Z_R1_scratch: object 513 __ set_info("throw_class_cast_exception", dont_gc_arguments); 514 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 515 } 516 break; 517 case throw_incompatible_class_change_error_id: 518 { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 519 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 520 } 521 break; 522 case slow_subtype_check_id: 523 { 524 // Arguments : 525 // sub : stack param 0 526 // super: stack param 1 527 // raddr: Z_R14, blown by call 528 // 529 // Result : condition code 0 for match (bcondEqual will be true), 530 // condition code 2 for miss (bcondNotEqual will be true) 531 NearLabel miss; 532 const Register Rsubklass = Z_ARG2; // sub 533 const Register Rsuperklass = Z_ARG3; // super 534 535 // No args, but tmp registers that are killed. 536 const Register Rlength = Z_ARG4; // cache array length 537 const Register Rarray_ptr = Z_ARG5; // Current value from cache array. 538 539 if (UseCompressedOops) { 540 assert(Universe::heap() != nullptr, "java heap must be initialized to generate partial_subtype_check stub"); 541 } 542 543 const int frame_size = 4*BytesPerWord + frame::z_abi_160_size; 544 // Save return pc. This is not necessary, but could be helpful 545 // in the case of crashes. 546 __ save_return_pc(); 547 __ push_frame(frame_size); 548 // Save registers before changing them. 549 int i = 0; 550 __ z_stg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 551 __ z_stg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 552 __ z_stg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 553 __ z_stg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 554 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 555 556 // Get sub and super from stack. 557 __ z_lg(Rsubklass, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 558 __ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 559 560 __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, nullptr, &miss); 561 562 // Match falls through here. 563 i = 0; 564 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 565 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 566 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 567 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 568 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 569 __ pop_frame(); 570 // Return pc is still in R_14. 571 __ clear_reg(Z_R0_scratch); // Zero indicates a match. Set CC 0 (bcondEqual will be true) 572 __ z_br(Z_R14); 573 574 __ BIND(miss); 575 i = 0; 576 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 577 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 578 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 579 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 580 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 581 __ pop_frame(); 582 // return pc is still in R_14 583 __ load_const_optimized(Z_R0_scratch, 1); // One indicates a miss. 584 __ z_ltgr(Z_R0_scratch, Z_R0_scratch); // Set CC 2 (bcondNotEqual will be true). 585 __ z_br(Z_R14); 586 } 587 break; 588 case monitorenter_nofpu_id: 589 case monitorenter_id: 590 { // Z_R1_scratch : object 591 // Z_R13 : lock address (see LIRGenerator::syncTempOpr()) 592 __ set_info("monitorenter", dont_gc_arguments); 593 594 int save_fpu_registers = (id == monitorenter_id); 595 // Make a frame and preserve the caller's caller-save registers. 596 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 597 598 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), Z_R1_scratch, Z_R13); 599 600 oop_maps = new OopMapSet(); 601 oop_maps->add_gc_map(call_offset, oop_map); 602 restore_live_registers(sasm, save_fpu_registers); 603 604 __ z_br(Z_R14); 605 } 606 break; 607 608 case monitorexit_nofpu_id: 609 case monitorexit_id: 610 { // Z_R1_scratch : lock address 611 // Note: really a leaf routine but must setup last java sp 612 // => Use call_RT for now (speed can be improved by 613 // doing last java sp setup manually). 614 __ set_info("monitorexit", dont_gc_arguments); 615 616 int save_fpu_registers = (id == monitorexit_id); 617 // Make a frame and preserve the caller's caller-save registers. 618 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 619 620 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), Z_R1_scratch); 621 622 oop_maps = new OopMapSet(); 623 oop_maps->add_gc_map(call_offset, oop_map); 624 restore_live_registers(sasm, save_fpu_registers); 625 626 __ z_br(Z_R14); 627 } 628 break; 629 630 case deoptimize_id: 631 { // Args: Z_R1_scratch: trap request 632 __ set_info("deoptimize", dont_gc_arguments); 633 Register trap_request = Z_R1_scratch; 634 OopMap* oop_map = save_live_registers(sasm); 635 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request); 636 oop_maps = new OopMapSet(); 637 oop_maps->add_gc_map(call_offset, oop_map); 638 restore_live_registers(sasm); 639 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 640 assert(deopt_blob != nullptr, "deoptimization blob must have been created"); 641 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 642 __ load_const_optimized(Z_R1_scratch, dest); 643 __ z_br(Z_R1_scratch); 644 } 645 break; 646 647 case access_field_patching_id: 648 { __ set_info("access_field_patching", dont_gc_arguments); 649 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 650 } 651 break; 652 653 case load_klass_patching_id: 654 { __ set_info("load_klass_patching", dont_gc_arguments); 655 // We should set up register map. 656 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 657 } 658 break; 659 660 case load_mirror_patching_id: 661 { __ set_info("load_mirror_patching", dont_gc_arguments); 662 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 663 } 664 break; 665 666 case load_appendix_patching_id: 667 { __ set_info("load_appendix_patching", dont_gc_arguments); 668 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 669 } 670 break; 671 #if 0 672 case dtrace_object_alloc_id: 673 { // rax,: object 674 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 675 // We can't gc here so skip the oopmap but make sure that all 676 // the live registers get saved. 677 save_live_registers(sasm, 1); 678 679 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 680 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)))); 681 NOT_LP64(__ pop(rax)); 682 683 restore_live_registers(sasm); 684 } 685 break; 686 687 case fpu2long_stub_id: 688 { 689 // rax, and rdx are destroyed, but should be free since the result is returned there 690 // preserve rsi,ecx 691 __ push(rsi); 692 __ push(rcx); 693 LP64_ONLY(__ push(rdx);) 694 695 // check for NaN 696 Label return0, do_return, return_min_jlong, do_convert; 697 698 Address value_high_word(rsp, wordSize + 4); 699 Address value_low_word(rsp, wordSize); 700 Address result_high_word(rsp, 3*wordSize + 4); 701 Address result_low_word(rsp, 3*wordSize); 702 703 __ subptr(rsp, 32); // more than enough on 32bit 704 __ fst_d(value_low_word); 705 __ movl(rax, value_high_word); 706 __ andl(rax, 0x7ff00000); 707 __ cmpl(rax, 0x7ff00000); 708 __ jcc(Assembler::notEqual, do_convert); 709 __ movl(rax, value_high_word); 710 __ andl(rax, 0xfffff); 711 __ orl(rax, value_low_word); 712 __ jcc(Assembler::notZero, return0); 713 714 __ bind(do_convert); 715 __ fnstcw(Address(rsp, 0)); 716 __ movzwl(rax, Address(rsp, 0)); 717 __ orl(rax, 0xc00); 718 __ movw(Address(rsp, 2), rax); 719 __ fldcw(Address(rsp, 2)); 720 __ fwait(); 721 __ fistp_d(result_low_word); 722 __ fldcw(Address(rsp, 0)); 723 __ fwait(); 724 // This gets the entire long in rax on 64bit 725 __ movptr(rax, result_low_word); 726 // testing of high bits 727 __ movl(rdx, result_high_word); 728 __ mov(rcx, rax); 729 // What the heck is the point of the next instruction??? 730 __ xorl(rcx, 0x0); 731 __ movl(rsi, 0x80000000); 732 __ xorl(rsi, rdx); 733 __ orl(rcx, rsi); 734 __ jcc(Assembler::notEqual, do_return); 735 __ fldz(); 736 __ fcomp_d(value_low_word); 737 __ fnstsw_ax(); 738 __ testl(rax, 0x4100); // ZF & CF == 0 739 __ jcc(Assembler::equal, return_min_jlong); 740 // return max_jlong 741 __ mov64(rax, CONST64(0x7fffffffffffffff)); 742 __ jmp(do_return); 743 744 __ bind(return_min_jlong); 745 __ mov64(rax, UCONST64(0x8000000000000000)); 746 __ jmp(do_return); 747 748 __ bind(return0); 749 __ fpop(); 750 __ xorptr(rax, rax); 751 752 __ bind(do_return); 753 __ addptr(rsp, 32); 754 LP64_ONLY(__ pop(rdx);) 755 __ pop(rcx); 756 __ pop(rsi); 757 __ ret(0); 758 } 759 break; 760 #endif // TODO 761 762 case predicate_failed_trap_id: 763 { 764 __ set_info("predicate_failed_trap", dont_gc_arguments); 765 766 OopMap* map = save_live_registers(sasm); 767 768 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 769 oop_maps = new OopMapSet(); 770 oop_maps->add_gc_map(call_offset, map); 771 restore_live_registers(sasm); 772 773 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 774 assert(deopt_blob != nullptr, "deoptimization blob must have been created"); 775 776 __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution()); 777 __ z_br(Z_R1_scratch); 778 } 779 break; 780 781 default: 782 { 783 __ should_not_reach_here(FILE_AND_LINE, id); 784 } 785 break; 786 } 787 return oop_maps; 788 } 789 790 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 791 __ block_comment("generate_handle_exception"); 792 793 // incoming parameters: Z_EXC_OOP, Z_EXC_PC 794 795 // Save registers if required. 796 OopMapSet* oop_maps = new OopMapSet(); 797 OopMap* oop_map = nullptr; 798 Register reg_fp = Z_R1_scratch; 799 800 switch (id) { 801 case forward_exception_id: { 802 // We're handling an exception in the context of a compiled frame. 803 // The registers have been saved in the standard places. Perform 804 // an exception lookup in the caller and dispatch to the handler 805 // if found. Otherwise unwind and dispatch to the callers 806 // exception handler. 807 oop_map = generate_oop_map(sasm); 808 809 // Load and clear pending exception oop into. 810 __ z_lg(Z_EXC_OOP, Address(Z_thread, Thread::pending_exception_offset())); 811 __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), 8); 812 813 // Different stubs forward their exceptions; they should all have similar frame layouts 814 // (a) to find their return address (b) for a correct oop_map generated above. 815 assert(RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers) == 816 RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers_except_r2), "requirement"); 817 818 // Load issuing PC (the return address for this stub). 819 const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size; 820 __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_common_abi(return_pc))); 821 DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));) 822 823 // Make sure that the vm_results are cleared (may be unnecessary). 824 __ clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(oop)); 825 __ clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(Metadata*)); 826 break; 827 } 828 case handle_exception_nofpu_id: 829 case handle_exception_id: 830 // At this point all registers MAY be live. 831 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) 832 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Z_EXC_PC); 833 break; 834 case handle_exception_from_callee_id: { 835 // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead. 836 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) 837 __ save_return_pc(Z_EXC_PC); 838 const int frame_size_in_bytes = __ push_frame_abi160(0); 839 oop_map = new OopMap(frame_size_in_bytes / VMRegImpl::stack_slot_size, 0); 840 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 841 break; 842 } 843 default: ShouldNotReachHere(); 844 } 845 846 // Verify that only Z_EXC_OOP, and Z_EXC_PC are valid at this time. 847 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC, reg_fp); 848 // Verify that Z_EXC_OOP, contains a valid exception. 849 __ verify_not_null_oop(Z_EXC_OOP); 850 851 // Check that fields in JavaThread for exception oop and issuing pc 852 // are empty before writing to them. 853 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0); 854 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0); 855 856 // Save exception oop and issuing pc into JavaThread. 857 // (Exception handler will load it from here.) 858 __ z_stg(Z_EXC_OOP, Address(Z_thread, JavaThread::exception_oop_offset())); 859 __ z_stg(Z_EXC_PC, Address(Z_thread, JavaThread::exception_pc_offset())); 860 861 #ifdef ASSERT 862 { NearLabel ok; 863 __ z_cg(Z_EXC_PC, Address(reg_fp, _z_common_abi(return_pc))); 864 __ branch_optimized(Assembler::bcondEqual, ok); 865 __ stop("use throwing pc as return address (has bci & oop map)"); 866 __ bind(ok); 867 } 868 #endif 869 870 // Compute the exception handler. 871 // The exception oop and the throwing pc are read from the fields in JavaThread. 872 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 873 oop_maps->add_gc_map(call_offset, oop_map); 874 875 // Z_RET(Z_R2): handler address 876 // will be the deopt blob if nmethod was deoptimized while we looked up 877 // handler regardless of whether handler existed in the nmethod. 878 879 // Only Z_R2, is valid at this time, all other registers have been destroyed by the runtime call. 880 __ invalidate_registers(Z_R2); 881 882 switch(id) { 883 case forward_exception_id: 884 case handle_exception_nofpu_id: 885 case handle_exception_id: 886 // Restore the registers that were saved at the beginning. 887 __ z_lgr(Z_R1_scratch, Z_R2); // Restoring live registers kills Z_R2. 888 restore_live_registers(sasm, id != handle_exception_nofpu_id); // Pops as well the frame. 889 __ z_br(Z_R1_scratch); 890 break; 891 case handle_exception_from_callee_id: { 892 __ pop_frame(); 893 __ z_br(Z_R2); // Jump to exception handler. 894 } 895 break; 896 default: ShouldNotReachHere(); 897 } 898 899 return oop_maps; 900 } 901 902 903 #undef __ 904 905 const char *Runtime1::pd_name_for_address(address entry) { 906 return "<unknown function>"; 907 }