1 /*
2 * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016, 2023 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "ci/ciUtilities.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "gc/shared/cardTable.hpp"
33 #include "gc/shared/cardTableBarrierSet.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "memory/universe.hpp"
36 #include "nativeInst_s390.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "register_s390.hpp"
40 #include "registerSaver_s390.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/signature.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "runtime/vframeArray.hpp"
45 #include "utilities/macros.hpp"
46 #include "utilities/powerOfTwo.hpp"
47 #include "vmreg_s390.inline.hpp"
48
49 // Implementation of StubAssembler
50
51 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
52 set_num_rt_args(0); // Nothing on stack.
53 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
54
55 // We cannot trust that code generated by the C++ compiler saves R14
56 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
57 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
58 // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save
59 // it into the frame anchor.
60 address pc = get_PC(Z_R1_scratch);
61 int call_offset = (int)(pc - addr_at(0));
62 set_last_Java_frame(Z_SP, Z_R1_scratch);
63
64 // ARG1 must hold thread address.
65 z_lgr(Z_ARG1, Z_thread);
66
67 address return_pc = nullptr;
68 align_call_far_patchable(this->pc());
69 return_pc = call_c_opt(entry_point);
70 assert(return_pc != nullptr, "const section overflow");
71
72 reset_last_Java_frame();
73
74 // Check for pending exceptions.
75 {
76 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
77
78 // This used to conditionally jump to forward_exception however it is
79 // possible if we relocate that the branch will not reach. So we must jump
80 // around so we can always reach.
81
82 Label ok;
83 z_bre(ok); // Bcondequal is the same as bcondZero.
84
85 // exception pending => forward to exception handler
86
87 // Make sure that the vm_results are cleared.
88 if (oop_result1->is_valid()) {
89 clear_mem(Address(Z_thread, JavaThread::vm_result_oop_offset()), sizeof(jlong));
90 }
91 if (metadata_result->is_valid()) {
92 clear_mem(Address(Z_thread, JavaThread::vm_result_metadata_offset()), sizeof(jlong));
93 }
94 if (frame_size() == no_frame_size) {
95 // Pop the stub frame.
96 pop_frame();
97 restore_return_pc();
98 load_const_optimized(Z_R1, StubRoutines::forward_exception_entry());
99 z_br(Z_R1);
100 } else if (_stub_id == (int)StubId::c1_forward_exception_id) {
101 should_not_reach_here();
102 } else {
103 load_const_optimized(Z_R1, Runtime1::entry_for (StubId::c1_forward_exception_id));
104 z_br(Z_R1);
105 }
106
107 bind(ok);
108 }
109
110 // Get oop results if there are any and reset the values in the thread.
111 if (oop_result1->is_valid()) {
112 get_vm_result_oop(oop_result1);
113 }
114 if (metadata_result->is_valid()) {
115 get_vm_result_metadata(metadata_result);
116 }
117
118 return call_offset;
119 }
120
121
122 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
123 // Z_ARG1 is reserved for the thread.
124 lgr_if_needed(Z_ARG2, arg1);
125 return call_RT(oop_result1, metadata_result, entry, 1);
126 }
127
128
129 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
130 // Z_ARG1 is reserved for the thread.
131 lgr_if_needed(Z_ARG2, arg1);
132 assert(arg2 != Z_ARG2, "smashed argument");
133 lgr_if_needed(Z_ARG3, arg2);
134 return call_RT(oop_result1, metadata_result, entry, 2);
135 }
136
137
138 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
139 // Z_ARG1 is reserved for the thread.
140 lgr_if_needed(Z_ARG2, arg1);
141 assert(arg2 != Z_ARG2, "smashed argument");
142 lgr_if_needed(Z_ARG3, arg2);
143 assert(arg3 != Z_ARG3, "smashed argument");
144 lgr_if_needed(Z_ARG4, arg3);
145 return call_RT(oop_result1, metadata_result, entry, 3);
146 }
147
148
149 // Implementation of Runtime1
150
151 #define __ sasm->
152
153 #ifndef PRODUCT
154 #undef __
155 #define __ (Verbose ? (sasm->block_comment(FILE_AND_LINE),sasm):sasm)->
156 #endif // !PRODUCT
157
158 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
159 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
160
161 static OopMap* generate_oop_map(StubAssembler* sasm) {
162 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers;
163 int frame_size_in_slots =
164 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
165 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
166 return RegisterSaver::generate_oop_map(sasm, reg_set);
167 }
168
169 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, Register return_pc = Z_R14) {
170 __ block_comment("save_live_registers");
171 RegisterSaver::RegisterSet reg_set =
172 save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
173 int frame_size_in_slots =
174 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
175 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
176 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
177 }
178
179 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) {
180 if (!save_fpu_registers) {
181 __ unimplemented(FILE_AND_LINE);
182 }
183 __ block_comment("save_live_registers");
184 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2;
185 int frame_size_in_slots =
186 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
187 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
188 return RegisterSaver::save_live_registers(sasm, reg_set);
189 }
190
191 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
192 __ block_comment("restore_live_registers");
193 RegisterSaver::RegisterSet reg_set =
194 restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
195 RegisterSaver::restore_live_registers(sasm, reg_set);
196 }
197
198 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) {
199 if (!restore_fpu_registers) {
200 __ unimplemented(FILE_AND_LINE);
201 }
202 __ block_comment("restore_live_registers_except_r2");
203 RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
204 }
205
206 void Runtime1::initialize_pd() {
207 // Nothing to do.
208 }
209
210 uint Runtime1::runtime_blob_current_thread_offset(frame f) {
211 Unimplemented();
212 return 0;
213 }
214
215 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
216 // Make a frame and preserve the caller's caller-save registers.
217 OopMap* oop_map = save_live_registers(sasm);
218 int call_offset;
219 if (!has_argument) {
220 call_offset = __ call_RT(noreg, noreg, target);
221 } else {
222 call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch);
223 }
224 OopMapSet* oop_maps = new OopMapSet();
225 oop_maps->add_gc_map(call_offset, oop_map);
226
227 __ should_not_reach_here();
228 return oop_maps;
229 }
230
231 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
232 // Incoming parameters: Z_EXC_OOP and Z_EXC_PC.
233 // Keep copies in callee-saved registers during runtime call.
234 const Register exception_oop_callee_saved = Z_R11;
235 const Register exception_pc_callee_saved = Z_R12;
236 // Other registers used in this stub.
237 const Register handler_addr = Z_R4;
238
239 if (AbortVMOnException) {
240 save_live_registers(sasm);
241 __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Z_EXC_OOP);
242 restore_live_registers(sasm);
243 }
244
245 // Verify that only exception_oop, is valid at this time.
246 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC);
247
248 // Check that fields in JavaThread for exception oop and issuing pc are set.
249 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
250 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
251
252 // Save exception_oop and pc in callee-saved register to preserve it
253 // during runtime calls.
254 __ verify_not_null_oop(Z_EXC_OOP);
255 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP);
256 __ lgr_if_needed(exception_pc_callee_saved, Z_EXC_PC);
257
258 __ push_frame_abi160(0); // Runtime code needs the z_abi_160.
259
260 // Search the exception handler address of the caller (using the return address).
261 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Z_thread, Z_EXC_PC);
262 // Z_RET(Z_R2): exception handler address of the caller.
263
264 __ pop_frame();
265
266 __ invalidate_registers(exception_oop_callee_saved, exception_pc_callee_saved, Z_RET);
267
268 // Move result of call into correct register.
269 __ lgr_if_needed(handler_addr, Z_RET);
270
271 // Restore exception oop and pc to Z_EXC_OOP and Z_EXC_PC (required convention of exception handler).
272 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved);
273 __ lgr_if_needed(Z_EXC_PC, exception_pc_callee_saved);
274
275 // Verify that there is really a valid exception in Z_EXC_OOP.
276 __ verify_not_null_oop(Z_EXC_OOP);
277
278 __ z_br(handler_addr); // Jump to exception handler.
279 }
280
281 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
282 // Make a frame and preserve the caller's caller-save registers.
283 OopMap* oop_map = save_live_registers(sasm);
284
285 // Call the runtime patching routine, returns non-zero if nmethod got deopted.
286 int call_offset = __ call_RT(noreg, noreg, target);
287 OopMapSet* oop_maps = new OopMapSet();
288 oop_maps->add_gc_map(call_offset, oop_map);
289
290 // Re-execute the patched instruction or, if the nmethod was
291 // deoptmized, return to the deoptimization handler entry that will
292 // cause re-execution of the current bytecode.
293 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
294 assert(deopt_blob != nullptr, "deoptimization blob must have been created");
295
296 __ z_ltr(Z_RET, Z_RET); // return value == 0
297
298 restore_live_registers(sasm);
299
300 __ z_bcr(Assembler::bcondZero, Z_R14);
301
302 // Return to the deoptimization handler entry for unpacking and
303 // rexecute if we simply returned then we'd deopt as if any call we
304 // patched had just returned.
305 AddressLiteral dest(deopt_blob->unpack_with_reexecution());
306 __ load_const_optimized(Z_R1_scratch, dest);
307 __ z_br(Z_R1_scratch);
308
309 return oop_maps;
310 }
311
312 OopMapSet* Runtime1::generate_code_for(StubId id, StubAssembler* sasm) {
313
314 // for better readability
315 const bool must_gc_arguments = true;
316 const bool dont_gc_arguments = false;
317
318 // Default value; overwritten for some optimized stubs that are
319 // called from methods that do not use the fpu.
320 bool save_fpu_registers = true;
321
322 // Stub code and info for the different stubs.
323 OopMapSet* oop_maps = nullptr;
324 switch (id) {
325 case StubId::c1_forward_exception_id:
326 {
327 oop_maps = generate_handle_exception(id, sasm);
328 // will not return
329 }
330 break;
331
332 case StubId::c1_new_instance_id:
333 case StubId::c1_fast_new_instance_id:
334 case StubId::c1_fast_new_instance_init_check_id:
335 {
336 Register klass = Z_R11; // Incoming
337 Register obj = Z_R2; // Result
338
339 if (id == StubId::c1_new_instance_id) {
340 __ set_info("new_instance", dont_gc_arguments);
341 } else if (id == StubId::c1_fast_new_instance_id) {
342 __ set_info("fast new_instance", dont_gc_arguments);
343 } else {
344 assert(id == StubId::c1_fast_new_instance_init_check_id, "bad StubId");
345 __ set_info("fast new_instance init check", dont_gc_arguments);
346 }
347
348 OopMap* map = save_live_registers_except_r2(sasm);
349 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
350 oop_maps = new OopMapSet();
351 oop_maps->add_gc_map(call_offset, map);
352 restore_live_registers_except_r2(sasm);
353
354 __ verify_oop(obj, FILE_AND_LINE);
355 __ z_br(Z_R14);
356 }
357 break;
358
359 case StubId::c1_counter_overflow_id:
360 {
361 // Arguments :
362 // bci : stack param 0
363 // method : stack param 1
364 //
365 Register bci = Z_ARG2, method = Z_ARG3;
366 // frame size in bytes
367 OopMap* map = save_live_registers(sasm);
368 const int frame_size = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
369 __ z_lg(bci, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
370 __ z_lg(method, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
371 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
372 oop_maps = new OopMapSet();
373 oop_maps->add_gc_map(call_offset, map);
374 restore_live_registers(sasm);
375 __ z_br(Z_R14);
376 }
377 break;
378 case StubId::c1_new_type_array_id:
379 case StubId::c1_new_object_array_id:
380 {
381 Register length = Z_R13; // Incoming
382 Register klass = Z_R11; // Incoming
383 Register obj = Z_R2; // Result
384
385 if (id == StubId::c1_new_type_array_id) {
386 __ set_info("new_type_array", dont_gc_arguments);
387 } else {
388 __ set_info("new_object_array", dont_gc_arguments);
389 }
390
391 #ifdef ASSERT
392 // Assert object type is really an array of the proper kind.
393 {
394 NearLabel ok;
395 Register t0 = obj;
396 __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false);
397 __ z_sra(t0, Klass::_lh_array_tag_shift);
398 int tag = ((id == StubId::c1_new_type_array_id)
399 ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_ref_value);
400 __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok);
401 __ stop("assert(is an array klass)");
402 __ should_not_reach_here();
403 __ bind(ok);
404 }
405 #endif // ASSERT
406
407 OopMap* map = save_live_registers_except_r2(sasm);
408 int call_offset;
409 if (id == StubId::c1_new_type_array_id) {
410 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
411 } else {
412 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
413 }
414
415 oop_maps = new OopMapSet();
416 oop_maps->add_gc_map(call_offset, map);
417 restore_live_registers_except_r2(sasm);
418
419 __ verify_oop(obj, FILE_AND_LINE);
420 __ z_br(Z_R14);
421 }
422 break;
423
424 case StubId::c1_new_multi_array_id:
425 { __ set_info("new_multi_array", dont_gc_arguments);
426 // Z_R3,: klass
427 // Z_R4,: rank
428 // Z_R5: address of 1st dimension
429 OopMap* map = save_live_registers(sasm);
430 int call_offset = __ call_RT(Z_R2, noreg, CAST_FROM_FN_PTR(address, new_multi_array), Z_R3, Z_R4, Z_R5);
431
432 oop_maps = new OopMapSet();
433 oop_maps->add_gc_map(call_offset, map);
434 restore_live_registers_except_r2(sasm);
435
436 // Z_R2,: new multi array
437 __ verify_oop(Z_R2, FILE_AND_LINE);
438 __ z_br(Z_R14);
439 }
440 break;
441
442 case StubId::c1_register_finalizer_id:
443 {
444 __ set_info("register_finalizer", dont_gc_arguments);
445
446 // Load the klass and check the has finalizer flag.
447 Register klass = Z_ARG2;
448 __ load_klass(klass, Z_ARG1);
449 __ z_tm(Address(klass, Klass::misc_flags_offset()), KlassFlags::_misc_has_finalizer);
450 __ z_bcr(Assembler::bcondAllZero, Z_R14); // Return if bit is not set.
451
452 OopMap* oop_map = save_live_registers(sasm);
453 int call_offset = __ call_RT(noreg, noreg,
454 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), Z_ARG1);
455 oop_maps = new OopMapSet();
456 oop_maps->add_gc_map(call_offset, oop_map);
457
458 // Now restore all the live registers.
459 restore_live_registers(sasm);
460
461 __ z_br(Z_R14);
462 }
463 break;
464
465 case StubId::c1_throw_range_check_failed_id:
466 { __ set_info("range_check_failed", dont_gc_arguments);
467 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
468 }
469 break;
470
471 case StubId::c1_throw_index_exception_id:
472 { __ set_info("index_range_check_failed", dont_gc_arguments);
473 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
474 }
475 break;
476 case StubId::c1_throw_div0_exception_id:
477 { __ set_info("throw_div0_exception", dont_gc_arguments);
478 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
479 }
480 break;
481 case StubId::c1_throw_null_pointer_exception_id:
482 { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
483 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
484 }
485 break;
486 case StubId::c1_handle_exception_nofpu_id:
487 case StubId::c1_handle_exception_id:
488 { __ set_info("handle_exception", dont_gc_arguments);
489 oop_maps = generate_handle_exception(id, sasm);
490 }
491 break;
492 case StubId::c1_handle_exception_from_callee_id:
493 { __ set_info("handle_exception_from_callee", dont_gc_arguments);
494 oop_maps = generate_handle_exception(id, sasm);
495 }
496 break;
497 case StubId::c1_unwind_exception_id:
498 { __ set_info("unwind_exception", dont_gc_arguments);
499 // Note: no stubframe since we are about to leave the current
500 // activation and we are calling a leaf VM function only.
501 generate_unwind_exception(sasm);
502 }
503 break;
504 case StubId::c1_throw_array_store_exception_id:
505 { __ set_info("throw_array_store_exception", dont_gc_arguments);
506 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
507 }
508 break;
509 case StubId::c1_throw_class_cast_exception_id:
510 { // Z_R1_scratch: object
511 __ set_info("throw_class_cast_exception", dont_gc_arguments);
512 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
513 }
514 break;
515 case StubId::c1_throw_incompatible_class_change_error_id:
516 { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
517 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
518 }
519 break;
520 case StubId::c1_slow_subtype_check_id:
521 {
522 // Arguments :
523 // sub : stack param 0
524 // super: stack param 1
525 // raddr: Z_R14, blown by call
526 //
527 // Result : condition code 0 for match (bcondEqual will be true),
528 // condition code 2 for miss (bcondNotEqual will be true)
529 NearLabel miss;
530 const Register Rsubklass = Z_ARG2; // sub
531 const Register Rsuperklass = Z_ARG3; // super
532
533 // No args, but tmp registers that are killed.
534 const Register Rlength = Z_ARG4; // cache array length
535 const Register Rarray_ptr = Z_ARG5; // Current value from cache array.
536
537 if (UseCompressedOops) {
538 assert(Universe::heap() != nullptr, "java heap must be initialized to generate partial_subtype_check stub");
539 }
540
541 const int frame_size = 4*BytesPerWord + frame::z_abi_160_size;
542 // Save return pc. This is not necessary, but could be helpful
543 // in the case of crashes.
544 __ save_return_pc();
545 __ push_frame(frame_size);
546 // Save registers before changing them.
547 int i = 0;
548 __ z_stg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
549 __ z_stg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
550 __ z_stg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
551 __ z_stg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
552 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
553
554 // Get sub and super from stack.
555 __ z_lg(Rsubklass, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
556 __ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
557
558 __ check_klass_subtype_slow_path(Rsubklass,
559 Rsuperklass,
560 Rarray_ptr /* temp_reg */,
561 Rlength /* temp2_reg */,
562 nullptr /* L_success */,
563 &miss /* L_failure */);
564
565 // Match falls through here.
566 i = 0;
567 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
568 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
569 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
570 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
571 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
572 __ pop_frame();
573 // Return pc is still in R_14.
574 __ clear_reg(Z_R0_scratch); // Zero indicates a match. Set CC 0 (bcondEqual will be true)
575 __ z_br(Z_R14);
576
577 __ BIND(miss);
578 i = 0;
579 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
580 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
581 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
582 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
583 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
584 __ pop_frame();
585 // return pc is still in R_14
586 __ load_const_optimized(Z_R0_scratch, 1); // One indicates a miss.
587 __ z_ltgr(Z_R0_scratch, Z_R0_scratch); // Set CC 2 (bcondNotEqual will be true).
588 __ z_br(Z_R14);
589 }
590 break;
591 case StubId::c1_is_instance_of_id:
592 {
593 // Mirror: Z_ARG1(R2)
594 // Object: Z_ARG2
595 // Temps: Z_ARG3, Z_ARG4, Z_ARG5, Z_R10, Z_R11
596 // Result: Z_RET(R2)
597
598 // Get the Klass* into Z_ARG3
599 Register klass = Z_ARG3 , obj = Z_ARG2, result = Z_RET;
600 Register temp0 = Z_ARG4, temp1 = Z_ARG5, temp2 = Z_R10, temp3 = Z_R11;
601
602 __ z_ltg(klass, Address(Z_ARG1, java_lang_Class::klass_offset())); // Klass is null
603
604 Label is_secondary;
605
606 __ clear_reg(result /* Z_R2 */, true /* whole_reg */, false /* set_cc */); // sets result=0 (failure)
607
608 __ z_bcr(Assembler::bcondEqual, Z_R14); // cc set by z_ltg above
609
610 __ z_ltgr(obj, obj); // obj is null
611 __ z_bcr(Assembler::bcondEqual, Z_R14);
612
613 __ z_llgf(temp0, Address(klass, in_bytes(Klass::super_check_offset_offset())));
614 __ compare32_and_branch(temp0, in_bytes(Klass::secondary_super_cache_offset()), Assembler::bcondEqual, is_secondary); // Klass is a secondary superclass
615
616 // Klass is a concrete class
617 __ load_klass(temp1, obj);
618 __ z_cg(klass, Address(temp1, temp0));
619
620 // result is already holding 0, denoting NotEqual case
621 __ load_on_condition_imm_32(result, 1, Assembler::bcondEqual);
622 __ z_br(Z_R14);
623
624 __ bind(is_secondary);
625
626 __ load_klass(obj, obj);
627
628 // This is necessary because I am never in my own secondary_super list.
629 __ z_cgr(obj, klass);
630 __ load_on_condition_imm_32(result, 1, Assembler::bcondEqual);
631 __ z_bcr(Assembler::bcondEqual, Z_R14);
632
633 // Z_R10 and Z_R11 are callee saved, so we must need to preserve them before any use
634 __ z_ldgr(Z_F1, Z_R10);
635 __ z_ldgr(Z_F3, Z_R11);
636
637 __ lookup_secondary_supers_table_var(obj, klass,
638 /*temps*/ temp0, temp1, temp2, temp3,
639 result);
640
641 // lookup_secondary_supers_table_var return 0 on success and 1 on failure.
642 // but this method returns 0 on failure and 1 on success.
643 // so we have to invert the result from lookup_secondary_supers_table_var.
644 __ z_xilf(result, 1); // invert the result
645
646 __ z_lgdr(Z_R10, Z_F1);
647 __ z_lgdr(Z_R11, Z_F3);
648
649 __ z_br(Z_R14);
650
651 }
652 case StubId::c1_monitorenter_nofpu_id:
653 case StubId::c1_monitorenter_id:
654 { // Z_R1_scratch : object
655 // Z_R13 : lock address (see LIRGenerator::syncTempOpr())
656 __ set_info("monitorenter", dont_gc_arguments);
657
658 int save_fpu_registers = (id == StubId::c1_monitorenter_id);
659 // Make a frame and preserve the caller's caller-save registers.
660 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
661
662 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), Z_R1_scratch, Z_R13);
663
664 oop_maps = new OopMapSet();
665 oop_maps->add_gc_map(call_offset, oop_map);
666 restore_live_registers(sasm, save_fpu_registers);
667
668 __ z_br(Z_R14);
669 }
670 break;
671
672 case StubId::c1_monitorexit_nofpu_id:
673 case StubId::c1_monitorexit_id:
674 { // Z_R1_scratch : lock address
675 // Note: really a leaf routine but must setup last java sp
676 // => Use call_RT for now (speed can be improved by
677 // doing last java sp setup manually).
678 __ set_info("monitorexit", dont_gc_arguments);
679
680 int save_fpu_registers = (id == StubId::c1_monitorexit_id);
681 // Make a frame and preserve the caller's caller-save registers.
682 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
683
684 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), Z_R1_scratch);
685
686 oop_maps = new OopMapSet();
687 oop_maps->add_gc_map(call_offset, oop_map);
688 restore_live_registers(sasm, save_fpu_registers);
689
690 __ z_br(Z_R14);
691 }
692 break;
693
694 case StubId::c1_deoptimize_id:
695 { // Args: Z_R1_scratch: trap request
696 __ set_info("deoptimize", dont_gc_arguments);
697 Register trap_request = Z_R1_scratch;
698 OopMap* oop_map = save_live_registers(sasm);
699 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
700 oop_maps = new OopMapSet();
701 oop_maps->add_gc_map(call_offset, oop_map);
702 restore_live_registers(sasm);
703 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
704 assert(deopt_blob != nullptr, "deoptimization blob must have been created");
705 AddressLiteral dest(deopt_blob->unpack_with_reexecution());
706 __ load_const_optimized(Z_R1_scratch, dest);
707 __ z_br(Z_R1_scratch);
708 }
709 break;
710
711 case StubId::c1_access_field_patching_id:
712 { __ set_info("access_field_patching", dont_gc_arguments);
713 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
714 }
715 break;
716
717 case StubId::c1_load_klass_patching_id:
718 { __ set_info("load_klass_patching", dont_gc_arguments);
719 // We should set up register map.
720 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
721 }
722 break;
723
724 case StubId::c1_load_mirror_patching_id:
725 { __ set_info("load_mirror_patching", dont_gc_arguments);
726 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
727 }
728 break;
729
730 case StubId::c1_load_appendix_patching_id:
731 { __ set_info("load_appendix_patching", dont_gc_arguments);
732 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
733 }
734 break;
735 #if 0
736 case StubId::c1_dtrace_object_alloc_id:
737 { // rax,: object
738 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
739 // We can't gc here so skip the oopmap but make sure that all
740 // the live registers get saved.
741 save_live_registers(sasm, 1);
742
743 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
744 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc))));
745 NOT_LP64(__ pop(rax));
746
747 restore_live_registers(sasm);
748 }
749 break;
750
751 case StubId::c1_fpu2long_stub_id:
752 {
753 // rax, and rdx are destroyed, but should be free since the result is returned there
754 // preserve rsi,ecx
755 __ push(rsi);
756 __ push(rcx);
757 LP64_ONLY(__ push(rdx);)
758
759 // check for NaN
760 Label return0, do_return, return_min_jlong, do_convert;
761
762 Address value_high_word(rsp, wordSize + 4);
763 Address value_low_word(rsp, wordSize);
764 Address result_high_word(rsp, 3*wordSize + 4);
765 Address result_low_word(rsp, 3*wordSize);
766
767 __ subptr(rsp, 32); // more than enough on 32bit
768 __ fst_d(value_low_word);
769 __ movl(rax, value_high_word);
770 __ andl(rax, 0x7ff00000);
771 __ cmpl(rax, 0x7ff00000);
772 __ jcc(Assembler::notEqual, do_convert);
773 __ movl(rax, value_high_word);
774 __ andl(rax, 0xfffff);
775 __ orl(rax, value_low_word);
776 __ jcc(Assembler::notZero, return0);
777
778 __ bind(do_convert);
779 __ fnstcw(Address(rsp, 0));
780 __ movzwl(rax, Address(rsp, 0));
781 __ orl(rax, 0xc00);
782 __ movw(Address(rsp, 2), rax);
783 __ fldcw(Address(rsp, 2));
784 __ fwait();
785 __ fistp_d(result_low_word);
786 __ fldcw(Address(rsp, 0));
787 __ fwait();
788 // This gets the entire long in rax on 64bit
789 __ movptr(rax, result_low_word);
790 // testing of high bits
791 __ movl(rdx, result_high_word);
792 __ mov(rcx, rax);
793 // What the heck is the point of the next instruction???
794 __ xorl(rcx, 0x0);
795 __ movl(rsi, 0x80000000);
796 __ xorl(rsi, rdx);
797 __ orl(rcx, rsi);
798 __ jcc(Assembler::notEqual, do_return);
799 __ fldz();
800 __ fcomp_d(value_low_word);
801 __ fnstsw_ax();
802 __ testl(rax, 0x4100); // ZF & CF == 0
803 __ jcc(Assembler::equal, return_min_jlong);
804 // return max_jlong
805 __ mov64(rax, CONST64(0x7fffffffffffffff));
806 __ jmp(do_return);
807
808 __ bind(return_min_jlong);
809 __ mov64(rax, UCONST64(0x8000000000000000));
810 __ jmp(do_return);
811
812 __ bind(return0);
813 __ fpop();
814 __ xorptr(rax, rax);
815
816 __ bind(do_return);
817 __ addptr(rsp, 32);
818 LP64_ONLY(__ pop(rdx);)
819 __ pop(rcx);
820 __ pop(rsi);
821 __ ret(0);
822 }
823 break;
824 #endif // TODO
825
826 case StubId::c1_predicate_failed_trap_id:
827 {
828 __ set_info("predicate_failed_trap", dont_gc_arguments);
829
830 OopMap* map = save_live_registers(sasm);
831
832 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
833 oop_maps = new OopMapSet();
834 oop_maps->add_gc_map(call_offset, map);
835 restore_live_registers(sasm);
836
837 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
838 assert(deopt_blob != nullptr, "deoptimization blob must have been created");
839
840 __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution());
841 __ z_br(Z_R1_scratch);
842 }
843 break;
844
845 default:
846 {
847 __ should_not_reach_here(FILE_AND_LINE, (int)id);
848 }
849 break;
850 }
851 return oop_maps;
852 }
853
854 OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler *sasm) {
855 __ block_comment("generate_handle_exception");
856
857 // incoming parameters: Z_EXC_OOP, Z_EXC_PC
858
859 // Save registers if required.
860 OopMapSet* oop_maps = new OopMapSet();
861 OopMap* oop_map = nullptr;
862 Register reg_fp = Z_R1_scratch;
863
864 switch (id) {
865 case StubId::c1_forward_exception_id: {
866 // We're handling an exception in the context of a compiled frame.
867 // The registers have been saved in the standard places. Perform
868 // an exception lookup in the caller and dispatch to the handler
869 // if found. Otherwise unwind and dispatch to the callers
870 // exception handler.
871 oop_map = generate_oop_map(sasm);
872
873 // Load and clear pending exception oop into.
874 __ z_lg(Z_EXC_OOP, Address(Z_thread, Thread::pending_exception_offset()));
875 __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), 8);
876
877 // Different stubs forward their exceptions; they should all have similar frame layouts
878 // (a) to find their return address (b) for a correct oop_map generated above.
879 assert(RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers) ==
880 RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers_except_r2), "requirement");
881
882 // Load issuing PC (the return address for this stub).
883 const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
884 __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_common_abi(return_pc)));
885 DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));)
886
887 // Make sure that the vm_results are cleared (may be unnecessary).
888 __ clear_mem(Address(Z_thread, JavaThread::vm_result_oop_offset()), sizeof(oop));
889 __ clear_mem(Address(Z_thread, JavaThread::vm_result_metadata_offset()), sizeof(Metadata*));
890 break;
891 }
892 case StubId::c1_handle_exception_nofpu_id:
893 case StubId::c1_handle_exception_id:
894 // At this point all registers MAY be live.
895 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
896 oop_map = save_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id, Z_EXC_PC);
897 break;
898 case StubId::c1_handle_exception_from_callee_id: {
899 // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead.
900 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
901 __ save_return_pc(Z_EXC_PC);
902 const int frame_size_in_bytes = __ push_frame_abi160(0);
903 oop_map = new OopMap(frame_size_in_bytes / VMRegImpl::stack_slot_size, 0);
904 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
905 break;
906 }
907 default: ShouldNotReachHere();
908 }
909
910 // Verify that only Z_EXC_OOP, and Z_EXC_PC are valid at this time.
911 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC, reg_fp);
912 // Verify that Z_EXC_OOP, contains a valid exception.
913 __ verify_not_null_oop(Z_EXC_OOP);
914
915 // Check that fields in JavaThread for exception oop and issuing pc
916 // are empty before writing to them.
917 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
918 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
919
920 // Save exception oop and issuing pc into JavaThread.
921 // (Exception handler will load it from here.)
922 __ z_stg(Z_EXC_OOP, Address(Z_thread, JavaThread::exception_oop_offset()));
923 __ z_stg(Z_EXC_PC, Address(Z_thread, JavaThread::exception_pc_offset()));
924
925 #ifdef ASSERT
926 { NearLabel ok;
927 __ z_cg(Z_EXC_PC, Address(reg_fp, _z_common_abi(return_pc)));
928 __ branch_optimized(Assembler::bcondEqual, ok);
929 __ stop("use throwing pc as return address (has bci & oop map)");
930 __ bind(ok);
931 }
932 #endif
933
934 // Compute the exception handler.
935 // The exception oop and the throwing pc are read from the fields in JavaThread.
936 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
937 oop_maps->add_gc_map(call_offset, oop_map);
938
939 // Z_RET(Z_R2): handler address
940 // will be the deopt blob if nmethod was deoptimized while we looked up
941 // handler regardless of whether handler existed in the nmethod.
942
943 // Only Z_R2, is valid at this time, all other registers have been destroyed by the runtime call.
944 __ invalidate_registers(Z_R2);
945
946 switch(id) {
947 case StubId::c1_forward_exception_id:
948 case StubId::c1_handle_exception_nofpu_id:
949 case StubId::c1_handle_exception_id:
950 // Restore the registers that were saved at the beginning.
951 __ z_lgr(Z_R1_scratch, Z_R2); // Restoring live registers kills Z_R2.
952 restore_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id); // Pops as well the frame.
953 __ z_br(Z_R1_scratch);
954 break;
955 case StubId::c1_handle_exception_from_callee_id: {
956 __ pop_frame();
957 __ z_br(Z_R2); // Jump to exception handler.
958 }
959 break;
960 default: ShouldNotReachHere();
961 }
962
963 return oop_maps;
964 }
965
966
967 #undef __
968
969 const char *Runtime1::pd_name_for_address(address entry) {
970 return "<unknown function>";
971 }