1 /*
  2  * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "c1/c1_Defs.hpp"
 28 #include "c1/c1_LIRAssembler.hpp"
 29 #include "c1/c1_MacroAssembler.hpp"
 30 #include "c1/c1_Runtime1.hpp"
 31 #include "ci/ciUtilities.hpp"
 32 #include "compiler/oopMap.hpp"
 33 #include "gc/shared/cardTable.hpp"
 34 #include "gc/shared/cardTableBarrierSet.hpp"
 35 #include "gc/shared/collectedHeap.hpp"
 36 #include "gc/shared/tlab_globals.hpp"
 37 #include "interpreter/interpreter.hpp"
 38 #include "memory/universe.hpp"
 39 #include "nativeInst_arm.hpp"
 40 #include "oops/compiledICHolder.hpp"
 41 #include "oops/oop.inline.hpp"
 42 #include "prims/jvmtiExport.hpp"
 43 #include "register_arm.hpp"
 44 #include "runtime/sharedRuntime.hpp"
 45 #include "runtime/signature.hpp"
 46 #include "runtime/vframeArray.hpp"
 47 #include "utilities/align.hpp"
 48 #include "vmreg_arm.inline.hpp"
 49 
 50 // Note: Rtemp usage is this file should not impact C2 and should be
 51 // correct as long as it is not implicitly used in lower layers (the
 52 // arm [macro]assembler) and used with care in the other C1 specific
 53 // files.
 54 
 55 // Implementation of StubAssembler
 56 
 57 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
 58   mov(R0, Rthread);
 59 
 60   int call_offset = set_last_Java_frame(SP, FP, false, Rtemp);
 61 
 62   call(entry);
 63   if (call_offset == -1) { // PC not saved
 64     call_offset = offset();
 65   }
 66   reset_last_Java_frame(Rtemp);
 67 
 68   assert(frame_size() != no_frame_size, "frame must be fixed");
 69   if (_stub_id != Runtime1::forward_exception_id) {
 70     ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
 71   }
 72 
 73   if (oop_result1->is_valid()) {
 74     assert_different_registers(oop_result1, R3, Rtemp);
 75     get_vm_result(oop_result1, Rtemp);
 76   }
 77   if (metadata_result->is_valid()) {
 78     assert_different_registers(metadata_result, R3, Rtemp);
 79     get_vm_result_2(metadata_result, Rtemp);
 80   }
 81 
 82   // Check for pending exception
 83   // unpack_with_exception_in_tls path is taken through
 84   // Runtime1::exception_handler_for_pc
 85   if (_stub_id != Runtime1::forward_exception_id) {
 86     assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id");
 87     cmp(R3, 0);
 88     jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
 89   } else {
 90 #ifdef ASSERT
 91     // Should not have pending exception in forward_exception stub
 92     ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
 93     cmp(R3, 0);
 94     breakpoint(ne);
 95 #endif // ASSERT
 96   }
 97   return call_offset;
 98 }
 99 
100 
101 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
102   if (arg1 != R1) {
103     mov(R1, arg1);
104   }
105   return call_RT(oop_result1, metadata_result, entry, 1);
106 }
107 
108 
109 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
110   assert(arg1 == R1 && arg2 == R2, "cannot handle otherwise");
111   return call_RT(oop_result1, metadata_result, entry, 2);
112 }
113 
114 
115 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
116   assert(arg1 == R1 && arg2 == R2 && arg3 == R3, "cannot handle otherwise");
117   return call_RT(oop_result1, metadata_result, entry, 3);
118 }
119 
120 
121 #define __ sasm->
122 
123 // TODO: ARM - does this duplicate RegisterSaver in SharedRuntime?
124 
125 enum RegisterLayout {
126   fpu_save_size = pd_nof_fpu_regs_reg_alloc,
127 #ifndef __SOFTFP__
128   D0_offset = 0,
129 #endif
130   R0_offset = fpu_save_size,
131   R1_offset,
132   R2_offset,
133   R3_offset,
134   R4_offset,
135   R5_offset,
136   R6_offset,
137 #if (FP_REG_NUM != 7)
138   R7_offset,
139 #endif
140   R8_offset,
141   R9_offset,
142   R10_offset,
143 #if (FP_REG_NUM != 11)
144   R11_offset,
145 #endif
146   R12_offset,
147   FP_offset,
148   LR_offset,
149   reg_save_size,
150   arg1_offset = reg_save_size * wordSize,
151   arg2_offset = (reg_save_size + 1) * wordSize
152 };
153 
154 
155 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
156   sasm->set_frame_size(reg_save_size /* in words */);
157 
158   // Record saved value locations in an OopMap.
159   // Locations are offsets from sp after runtime call.
160   OopMap* map = new OopMap(VMRegImpl::slots_per_word * reg_save_size, 0);
161 
162   int j=0;
163   for (int i = R0_offset; i < R10_offset; i++) {
164     if (j == FP_REG_NUM) {
165       // skip the FP register, saved below
166       j++;
167     }
168     map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg());
169     j++;
170   }
171   assert(j == R10->encoding(), "must be");
172 #if (FP_REG_NUM != 11)
173   // add R11, if not saved as FP
174   map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg());
175 #endif
176   map->set_callee_saved(VMRegImpl::stack2reg(FP_offset), FP->as_VMReg());
177   map->set_callee_saved(VMRegImpl::stack2reg(LR_offset), LR->as_VMReg());
178 
179   if (save_fpu_registers) {
180     for (int i = 0; i < fpu_save_size; i++) {
181       map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg());
182     }
183   }
184 
185   return map;
186 }
187 
188 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
189   __ block_comment("save_live_registers");
190   sasm->set_frame_size(reg_save_size /* in words */);
191 
192   __ push(RegisterSet(FP) | RegisterSet(LR));
193   __ push(RegisterSet(R0, R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
194   if (save_fpu_registers) {
195     __ fpush(FloatRegisterSet(D0, fpu_save_size / 2));
196   } else {
197     __ sub(SP, SP, fpu_save_size * wordSize);
198   }
199 
200   return generate_oop_map(sasm, save_fpu_registers);
201 }
202 
203 
204 static void restore_live_registers(StubAssembler* sasm,
205                                    bool restore_R0,
206                                    bool restore_FP_LR,
207                                    bool do_return,
208                                    bool restore_fpu_registers = HaveVFP) {
209   __ block_comment("restore_live_registers");
210 
211   if (restore_fpu_registers) {
212     __ fpop(FloatRegisterSet(D0, fpu_save_size / 2));
213     if (!restore_R0) {
214       __ add(SP, SP, (R1_offset - fpu_save_size) * wordSize);
215     }
216   } else {
217     __ add(SP, SP, (restore_R0 ? fpu_save_size : R1_offset) * wordSize);
218   }
219   __ pop(RegisterSet((restore_R0 ? R0 : R1), R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
220   if (restore_FP_LR) {
221     __ pop(RegisterSet(FP) | RegisterSet(do_return ? PC : LR));
222   } else {
223     assert (!do_return, "return without restoring FP/LR");
224   }
225 }
226 
227 
228 static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
229   restore_live_registers(sasm, false, true, true, restore_fpu_registers);
230 }
231 
232 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
233   restore_live_registers(sasm, true, true, true, restore_fpu_registers);
234 }
235 
236 static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
237   restore_live_registers(sasm, true, false, false, restore_fpu_registers);
238 }
239 
240 static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
241   restore_live_registers(sasm, true, true, false, restore_fpu_registers);
242 }
243 
244 void StubAssembler::save_live_registers() {
245   ::save_live_registers(this);
246 }
247 
248 void StubAssembler::restore_live_registers_without_return() {
249   ::restore_live_registers_without_return(this);
250 }
251 
252 void Runtime1::initialize_pd() {
253 }
254 
255 
256 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
257   OopMap* oop_map = save_live_registers(sasm);
258 
259   int call_offset;
260   if (has_argument) {
261     __ ldr(R1, Address(SP, arg1_offset));
262     __ ldr(R2, Address(SP, arg2_offset));
263     call_offset = __ call_RT(noreg, noreg, target, R1, R2);
264   } else {
265     call_offset = __ call_RT(noreg, noreg, target);
266   }
267 
268   OopMapSet* oop_maps = new OopMapSet();
269   oop_maps->add_gc_map(call_offset, oop_map);
270 
271   DEBUG_ONLY(STOP("generate_exception_throw");)  // Should not reach here
272   return oop_maps;
273 }
274 
275 
276 static void restore_sp_for_method_handle(StubAssembler* sasm) {
277   // Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
278   __ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
279   __ cmp(Rtemp, 0);
280   __ mov(SP, Rmh_SP_save, ne);
281 }
282 
283 
284 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
285   __ block_comment("generate_handle_exception");
286 
287   bool save_fpu_registers = false;
288 
289   // Save registers, if required.
290   OopMapSet* oop_maps = new OopMapSet();
291   OopMap* oop_map = nullptr;
292 
293   switch (id) {
294   case forward_exception_id: {
295     save_fpu_registers = HaveVFP;
296     oop_map = generate_oop_map(sasm);
297     __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
298     __ ldr(Rexception_pc, Address(SP, LR_offset * wordSize));
299     Register zero = __ zero_register(Rtemp);
300     __ str(zero, Address(Rthread, Thread::pending_exception_offset()));
301     break;
302   }
303   case handle_exception_id:
304     save_fpu_registers = HaveVFP;
305     // fall-through
306   case handle_exception_nofpu_id:
307     // At this point all registers MAY be live.
308     oop_map = save_live_registers(sasm, save_fpu_registers);
309     break;
310   case handle_exception_from_callee_id:
311     // At this point all registers except exception oop (R4/R19) and
312     // exception pc (R5/R20) are dead.
313     oop_map = save_live_registers(sasm);  // TODO it's not required to save all registers
314     break;
315   default:  ShouldNotReachHere();
316   }
317 
318   __ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
319   __ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
320 
321   __ str(Rexception_pc, Address(SP, LR_offset * wordSize)); // patch throwing pc into return address
322 
323   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
324   oop_maps->add_gc_map(call_offset, oop_map);
325 
326   // Exception handler found
327   __ str(R0, Address(SP, LR_offset * wordSize)); // patch the return address
328 
329   // Restore the registers that were saved at the beginning, remove
330   // frame and jump to the exception handler.
331   switch (id) {
332   case forward_exception_id:
333   case handle_exception_nofpu_id:
334   case handle_exception_id:
335     restore_live_registers(sasm, save_fpu_registers);
336     // Note: the restore live registers includes the jump to LR (patched to R0)
337     break;
338   case handle_exception_from_callee_id:
339     restore_live_registers_without_return(sasm); // must not jump immediately to handler
340     restore_sp_for_method_handle(sasm);
341     __ ret();
342     break;
343   default:  ShouldNotReachHere();
344   }
345 
346   DEBUG_ONLY(STOP("generate_handle_exception");)  // Should not reach here
347 
348   return oop_maps;
349 }
350 
351 
352 void Runtime1::generate_unwind_exception(StubAssembler* sasm) {
353 
354   if (AbortVMOnException) {
355     save_live_registers(sasm);
356     __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Rexception_obj);
357     restore_live_registers(sasm);
358   }
359 
360   // FP no longer used to find the frame start
361   // on entry, remove_frame() has already been called (restoring FP and LR)
362 
363   // search the exception handler address of the caller (using the return address)
364   __ mov(c_rarg0, Rthread);
365   __ mov(Rexception_pc, LR);
366   __ mov(c_rarg1, LR);
367   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
368 
369   // Exception oop should be still in Rexception_obj and pc in Rexception_pc
370   // Jump to handler
371   __ verify_not_null_oop(Rexception_obj);
372 
373   // JSR292 extension
374   restore_sp_for_method_handle(sasm);
375 
376   __ jump(R0);
377 }
378 
379 
380 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
381   OopMap* oop_map = save_live_registers(sasm);
382 
383   // call the runtime patching routine, returns non-zero if nmethod got deopted.
384   int call_offset = __ call_RT(noreg, noreg, target);
385   OopMapSet* oop_maps = new OopMapSet();
386   oop_maps->add_gc_map(call_offset, oop_map);
387 
388   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
389   assert(deopt_blob != nullptr, "deoptimization blob must have been created");
390 
391   __ cmp_32(R0, 0);
392 
393   restore_live_registers_except_FP_LR(sasm);
394   __ pop(RegisterSet(FP) | RegisterSet(PC), eq);
395 
396   // Deoptimization needed
397   // TODO: ARM - no need to restore FP & LR because unpack_with_reexecution() stores them back
398   __ pop(RegisterSet(FP) | RegisterSet(LR));
399 
400   __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
401 
402   DEBUG_ONLY(STOP("generate_patching");)  // Should not reach here
403   return oop_maps;
404 }
405 
406 
407 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
408   const bool must_gc_arguments = true;
409   const bool dont_gc_arguments = false;
410 
411   OopMapSet* oop_maps = nullptr;
412   bool save_fpu_registers = HaveVFP;
413 
414   switch (id) {
415     case forward_exception_id:
416       {
417         oop_maps = generate_handle_exception(id, sasm);
418         // does not return on ARM
419       }
420       break;
421 
422     case new_instance_id:
423     case fast_new_instance_id:
424     case fast_new_instance_init_check_id:
425       {
426         const Register result = R0;
427         const Register klass  = R1;
428 
429         OopMap* map = save_live_registers(sasm);
430         int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
431         oop_maps = new OopMapSet();
432         oop_maps->add_gc_map(call_offset, map);
433 
434         // MacroAssembler::StoreStore useless (included in the runtime exit path)
435 
436         restore_live_registers_except_R0(sasm);
437       }
438       break;
439 
440     case counter_overflow_id:
441       {
442         OopMap* oop_map = save_live_registers(sasm);
443         __ ldr(R1, Address(SP, arg1_offset));
444         __ ldr(R2, Address(SP, arg2_offset));
445         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2);
446         oop_maps = new OopMapSet();
447         oop_maps->add_gc_map(call_offset, oop_map);
448         restore_live_registers(sasm);
449       }
450       break;
451 
452     case new_type_array_id:
453     case new_object_array_id:
454       {
455         if (id == new_type_array_id) {
456           __ set_info("new_type_array", dont_gc_arguments);
457         } else {
458           __ set_info("new_object_array", dont_gc_arguments);
459         }
460 
461         const Register result = R0;
462         const Register klass  = R1;
463         const Register length = R2;
464 
465         OopMap* map = save_live_registers(sasm);
466         int call_offset;
467         if (id == new_type_array_id) {
468           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
469         } else {
470           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
471         }
472         oop_maps = new OopMapSet();
473         oop_maps->add_gc_map(call_offset, map);
474 
475         // MacroAssembler::StoreStore useless (included in the runtime exit path)
476 
477         restore_live_registers_except_R0(sasm);
478       }
479       break;
480 
481     case new_multi_array_id:
482       {
483         __ set_info("new_multi_array", dont_gc_arguments);
484 
485         // R0: klass
486         // R2: rank
487         // SP: address of 1st dimension
488         const Register result = R0;
489         OopMap* map = save_live_registers(sasm);
490 
491         __ mov(R1, R0);
492         __ add(R3, SP, arg1_offset);
493         int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_multi_array), R1, R2, R3);
494 
495         oop_maps = new OopMapSet();
496         oop_maps->add_gc_map(call_offset, map);
497 
498         // MacroAssembler::StoreStore useless (included in the runtime exit path)
499 
500         restore_live_registers_except_R0(sasm);
501       }
502       break;
503 
504     case register_finalizer_id:
505       {
506         __ set_info("register_finalizer", dont_gc_arguments);
507 
508         // Do not call runtime if JVM_ACC_HAS_FINALIZER flag is not set
509         __ load_klass(Rtemp, R0);
510         __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
511 
512         __ tst(Rtemp, JVM_ACC_HAS_FINALIZER);
513         __ bx(LR, eq);
514 
515         // Call VM
516         OopMap* map = save_live_registers(sasm);
517         oop_maps = new OopMapSet();
518         int call_offset = __ call_RT(noreg, noreg,
519                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R0);
520         oop_maps->add_gc_map(call_offset, map);
521         restore_live_registers(sasm);
522       }
523       break;
524 
525     case throw_range_check_failed_id:
526       {
527         __ set_info("range_check_failed", dont_gc_arguments);
528         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
529       }
530       break;
531 
532     case throw_index_exception_id:
533       {
534         __ set_info("index_range_check_failed", dont_gc_arguments);
535         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
536       }
537       break;
538 
539     case throw_div0_exception_id:
540       {
541         __ set_info("throw_div0_exception", dont_gc_arguments);
542         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
543       }
544       break;
545 
546     case throw_null_pointer_exception_id:
547       {
548         __ set_info("throw_null_pointer_exception", dont_gc_arguments);
549         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
550       }
551       break;
552 
553     case handle_exception_nofpu_id:
554     case handle_exception_id:
555       {
556         __ set_info("handle_exception", dont_gc_arguments);
557         oop_maps = generate_handle_exception(id, sasm);
558       }
559       break;
560 
561     case handle_exception_from_callee_id:
562       {
563         __ set_info("handle_exception_from_callee", dont_gc_arguments);
564         oop_maps = generate_handle_exception(id, sasm);
565       }
566       break;
567 
568     case unwind_exception_id:
569       {
570         __ set_info("unwind_exception", dont_gc_arguments);
571         generate_unwind_exception(sasm);
572       }
573       break;
574 
575     case throw_array_store_exception_id:
576       {
577         __ set_info("throw_array_store_exception", dont_gc_arguments);
578         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
579       }
580       break;
581 
582     case throw_class_cast_exception_id:
583       {
584         __ set_info("throw_class_cast_exception", dont_gc_arguments);
585         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
586       }
587       break;
588 
589     case throw_incompatible_class_change_error_id:
590       {
591         __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
592         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
593       }
594       break;
595 
596     case slow_subtype_check_id:
597       {
598         // (in)  R0 - sub, destroyed,
599         // (in)  R1 - super, not changed
600         // (out) R0 - result: 1 if check passed, 0 otherwise
601         __ raw_push(R2, R3, LR);
602 
603         // Load an array of secondary_supers
604         __ ldr(R2, Address(R0, Klass::secondary_supers_offset()));
605         // Length goes to R3
606         __ ldr_s32(R3, Address(R2, Array<Klass*>::length_offset_in_bytes()));
607         __ add(R2, R2, Array<Klass*>::base_offset_in_bytes());
608 
609         Label loop, miss;
610         __ bind(loop);
611         __ cbz(R3, miss);
612         __ ldr(LR, Address(R2, wordSize, post_indexed));
613         __ sub(R3, R3, 1);
614         __ cmp(LR, R1);
615         __ b(loop, ne);
616 
617         // We get here if an equal cache entry is found
618         __ str(R1, Address(R0, Klass::secondary_super_cache_offset()));
619         __ mov(R0, 1);
620         __ raw_pop_and_ret(R2, R3);
621 
622         // A cache entry not found - return false
623         __ bind(miss);
624         __ mov(R0, 0);
625         __ raw_pop_and_ret(R2, R3);
626       }
627       break;
628 
629     case monitorenter_nofpu_id:
630       save_fpu_registers = false;
631       // fall through
632     case monitorenter_id:
633       {
634         __ set_info("monitorenter", dont_gc_arguments);
635         const Register obj  = R1;
636         const Register lock = R2;
637         OopMap* map = save_live_registers(sasm, save_fpu_registers);
638         __ ldr(obj, Address(SP, arg1_offset));
639         __ ldr(lock, Address(SP, arg2_offset));
640         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), obj, lock);
641         oop_maps = new OopMapSet();
642         oop_maps->add_gc_map(call_offset, map);
643         restore_live_registers(sasm, save_fpu_registers);
644       }
645       break;
646 
647     case monitorexit_nofpu_id:
648       save_fpu_registers = false;
649       // fall through
650     case monitorexit_id:
651       {
652         __ set_info("monitorexit", dont_gc_arguments);
653         const Register lock = R1;
654         OopMap* map = save_live_registers(sasm, save_fpu_registers);
655         __ ldr(lock, Address(SP, arg1_offset));
656         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), lock);
657         oop_maps = new OopMapSet();
658         oop_maps->add_gc_map(call_offset, map);
659         restore_live_registers(sasm, save_fpu_registers);
660       }
661       break;
662 
663     case deoptimize_id:
664       {
665         __ set_info("deoptimize", dont_gc_arguments);
666         OopMap* oop_map = save_live_registers(sasm);
667         const Register trap_request = R1;
668         __ ldr(trap_request, Address(SP, arg1_offset));
669         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
670         oop_maps = new OopMapSet();
671         oop_maps->add_gc_map(call_offset, oop_map);
672         restore_live_registers_without_return(sasm);
673         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
674         assert(deopt_blob != nullptr, "deoptimization blob must have been created");
675         __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, noreg);
676       }
677       break;
678 
679     case access_field_patching_id:
680       {
681         __ set_info("access_field_patching", dont_gc_arguments);
682         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
683       }
684       break;
685 
686     case load_klass_patching_id:
687       {
688         __ set_info("load_klass_patching", dont_gc_arguments);
689         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
690       }
691       break;
692 
693     case load_appendix_patching_id:
694       {
695         __ set_info("load_appendix_patching", dont_gc_arguments);
696         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
697       }
698       break;
699 
700     case load_mirror_patching_id:
701       {
702         __ set_info("load_mirror_patching", dont_gc_arguments);
703         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
704       }
705       break;
706 
707     case predicate_failed_trap_id:
708       {
709         __ set_info("predicate_failed_trap", dont_gc_arguments);
710 
711         OopMap* oop_map = save_live_registers(sasm);
712         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
713 
714         oop_maps = new OopMapSet();
715         oop_maps->add_gc_map(call_offset, oop_map);
716 
717         restore_live_registers_without_return(sasm);
718 
719         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
720         assert(deopt_blob != nullptr, "deoptimization blob must have been created");
721         __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
722       }
723       break;
724 
725     default:
726       {
727         __ set_info("unimplemented entry", dont_gc_arguments);
728         STOP("unimplemented entry");
729       }
730       break;
731   }
732   return oop_maps;
733 }
734 
735 #undef __
736 
737 #ifdef __SOFTFP__
738 const char *Runtime1::pd_name_for_address(address entry) {
739 
740 #define FUNCTION_CASE(a, f) \
741   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
742 
743   FUNCTION_CASE(entry, __aeabi_fadd_glibc);
744   FUNCTION_CASE(entry, __aeabi_fmul);
745   FUNCTION_CASE(entry, __aeabi_fsub_glibc);
746   FUNCTION_CASE(entry, __aeabi_fdiv);
747 
748   // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
749   FUNCTION_CASE(entry, __aeabi_dadd_glibc);
750   FUNCTION_CASE(entry, __aeabi_dmul);
751   FUNCTION_CASE(entry, __aeabi_dsub_glibc);
752   FUNCTION_CASE(entry, __aeabi_ddiv);
753 
754   FUNCTION_CASE(entry, __aeabi_f2d);
755   FUNCTION_CASE(entry, __aeabi_d2f);
756   FUNCTION_CASE(entry, __aeabi_i2f);
757   FUNCTION_CASE(entry, __aeabi_i2d);
758   FUNCTION_CASE(entry, __aeabi_f2iz);
759 
760   FUNCTION_CASE(entry, SharedRuntime::fcmpl);
761   FUNCTION_CASE(entry, SharedRuntime::fcmpg);
762   FUNCTION_CASE(entry, SharedRuntime::dcmpl);
763   FUNCTION_CASE(entry, SharedRuntime::dcmpg);
764 
765   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmplt);
766   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmplt);
767   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple);
768   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple);
769   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpge);
770   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpge);
771   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpgt);
772   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpgt);
773 
774   FUNCTION_CASE(entry, SharedRuntime::fneg);
775   FUNCTION_CASE(entry, SharedRuntime::dneg);
776 
777   FUNCTION_CASE(entry, __aeabi_fcmpeq);
778   FUNCTION_CASE(entry, __aeabi_fcmplt);
779   FUNCTION_CASE(entry, __aeabi_fcmple);
780   FUNCTION_CASE(entry, __aeabi_fcmpge);
781   FUNCTION_CASE(entry, __aeabi_fcmpgt);
782 
783   FUNCTION_CASE(entry, __aeabi_dcmpeq);
784   FUNCTION_CASE(entry, __aeabi_dcmplt);
785   FUNCTION_CASE(entry, __aeabi_dcmple);
786   FUNCTION_CASE(entry, __aeabi_dcmpge);
787   FUNCTION_CASE(entry, __aeabi_dcmpgt);
788 #undef FUNCTION_CASE
789   return "";
790 }
791 #else  // __SOFTFP__
792 const char *Runtime1::pd_name_for_address(address entry) {
793   return "<unknown function>";
794 }
795 #endif // __SOFTFP__