1 /*
  2  * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "c1/c1_Defs.hpp"
 28 #include "c1/c1_LIRAssembler.hpp"
 29 #include "c1/c1_MacroAssembler.hpp"
 30 #include "c1/c1_Runtime1.hpp"
 31 #include "ci/ciUtilities.hpp"
 32 #include "compiler/oopMap.hpp"
 33 #include "gc/shared/cardTable.hpp"
 34 #include "gc/shared/cardTableBarrierSet.hpp"
 35 #include "gc/shared/collectedHeap.hpp"
 36 #include "gc/shared/tlab_globals.hpp"
 37 #include "interpreter/interpreter.hpp"
 38 #include "memory/universe.hpp"
 39 #include "nativeInst_arm.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "prims/jvmtiExport.hpp"
 42 #include "register_arm.hpp"
 43 #include "runtime/sharedRuntime.hpp"
 44 #include "runtime/signature.hpp"
 45 #include "runtime/vframeArray.hpp"
 46 #include "utilities/align.hpp"
 47 #include "vmreg_arm.inline.hpp"
 48 
 49 // Note: Rtemp usage is this file should not impact C2 and should be
 50 // correct as long as it is not implicitly used in lower layers (the
 51 // arm [macro]assembler) and used with care in the other C1 specific
 52 // files.
 53 
 54 // Implementation of StubAssembler
 55 
 56 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
 57   mov(R0, Rthread);
 58 
 59   int call_offset = set_last_Java_frame(SP, FP, false, Rtemp);
 60 
 61   call(entry);
 62   if (call_offset == -1) { // PC not saved
 63     call_offset = offset();
 64   }
 65   reset_last_Java_frame(Rtemp);
 66 
 67   assert(frame_size() != no_frame_size, "frame must be fixed");
 68   if (_stub_id != Runtime1::forward_exception_id) {
 69     ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
 70   }
 71 
 72   if (oop_result1->is_valid()) {
 73     assert_different_registers(oop_result1, R3, Rtemp);
 74     get_vm_result(oop_result1, Rtemp);
 75   }
 76   if (metadata_result->is_valid()) {
 77     assert_different_registers(metadata_result, R3, Rtemp);
 78     get_vm_result_2(metadata_result, Rtemp);
 79   }
 80 
 81   // Check for pending exception
 82   // unpack_with_exception_in_tls path is taken through
 83   // Runtime1::exception_handler_for_pc
 84   if (_stub_id != Runtime1::forward_exception_id) {
 85     assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id");
 86     cmp(R3, 0);
 87     jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
 88   } else {
 89 #ifdef ASSERT
 90     // Should not have pending exception in forward_exception stub
 91     ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
 92     cmp(R3, 0);
 93     breakpoint(ne);
 94 #endif // ASSERT
 95   }
 96   return call_offset;
 97 }
 98 
 99 
100 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
101   if (arg1 != R1) {
102     mov(R1, arg1);
103   }
104   return call_RT(oop_result1, metadata_result, entry, 1);
105 }
106 
107 
108 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
109   assert(arg1 == R1 && arg2 == R2, "cannot handle otherwise");
110   return call_RT(oop_result1, metadata_result, entry, 2);
111 }
112 
113 
114 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
115   assert(arg1 == R1 && arg2 == R2 && arg3 == R3, "cannot handle otherwise");
116   return call_RT(oop_result1, metadata_result, entry, 3);
117 }
118 
119 
120 #define __ sasm->
121 
122 // TODO: ARM - does this duplicate RegisterSaver in SharedRuntime?
123 
124 enum RegisterLayout {
125   fpu_save_size = pd_nof_fpu_regs_reg_alloc,
126 #ifndef __SOFTFP__
127   D0_offset = 0,
128 #endif
129   R0_offset = fpu_save_size,
130   R1_offset,
131   R2_offset,
132   R3_offset,
133   R4_offset,
134   R5_offset,
135   R6_offset,
136 #if (FP_REG_NUM != 7)
137   R7_offset,
138 #endif
139   R8_offset,
140   R9_offset,
141   R10_offset,
142 #if (FP_REG_NUM != 11)
143   R11_offset,
144 #endif
145   R12_offset,
146   FP_offset,
147   LR_offset,
148   reg_save_size,
149   arg1_offset = reg_save_size * wordSize,
150   arg2_offset = (reg_save_size + 1) * wordSize
151 };
152 
153 
154 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
155   sasm->set_frame_size(reg_save_size /* in words */);
156 
157   // Record saved value locations in an OopMap.
158   // Locations are offsets from sp after runtime call.
159   OopMap* map = new OopMap(VMRegImpl::slots_per_word * reg_save_size, 0);
160 
161   int j=0;
162   for (int i = R0_offset; i < R10_offset; i++) {
163     if (j == FP_REG_NUM) {
164       // skip the FP register, saved below
165       j++;
166     }
167     map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg());
168     j++;
169   }
170   assert(j == R10->encoding(), "must be");
171 #if (FP_REG_NUM != 11)
172   // add R11, if not saved as FP
173   map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg());
174 #endif
175   map->set_callee_saved(VMRegImpl::stack2reg(FP_offset), FP->as_VMReg());
176   map->set_callee_saved(VMRegImpl::stack2reg(LR_offset), LR->as_VMReg());
177 
178   if (save_fpu_registers) {
179     for (int i = 0; i < fpu_save_size; i++) {
180       map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg());
181     }
182   }
183 
184   return map;
185 }
186 
187 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
188   __ block_comment("save_live_registers");
189   sasm->set_frame_size(reg_save_size /* in words */);
190 
191   __ push(RegisterSet(FP) | RegisterSet(LR));
192   __ push(RegisterSet(R0, R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
193   if (save_fpu_registers) {
194     __ fpush(FloatRegisterSet(D0, fpu_save_size / 2));
195   } else {
196     __ sub(SP, SP, fpu_save_size * wordSize);
197   }
198 
199   return generate_oop_map(sasm, save_fpu_registers);
200 }
201 
202 
203 static void restore_live_registers(StubAssembler* sasm,
204                                    bool restore_R0,
205                                    bool restore_FP_LR,
206                                    bool do_return,
207                                    bool restore_fpu_registers = HaveVFP) {
208   __ block_comment("restore_live_registers");
209 
210   if (restore_fpu_registers) {
211     __ fpop(FloatRegisterSet(D0, fpu_save_size / 2));
212     if (!restore_R0) {
213       __ add(SP, SP, (R1_offset - fpu_save_size) * wordSize);
214     }
215   } else {
216     __ add(SP, SP, (restore_R0 ? fpu_save_size : R1_offset) * wordSize);
217   }
218   __ pop(RegisterSet((restore_R0 ? R0 : R1), R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
219   if (restore_FP_LR) {
220     __ pop(RegisterSet(FP) | RegisterSet(do_return ? PC : LR));
221   } else {
222     assert (!do_return, "return without restoring FP/LR");
223   }
224 }
225 
226 
227 static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
228   restore_live_registers(sasm, false, true, true, restore_fpu_registers);
229 }
230 
231 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
232   restore_live_registers(sasm, true, true, true, restore_fpu_registers);
233 }
234 
235 static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
236   restore_live_registers(sasm, true, false, false, restore_fpu_registers);
237 }
238 
239 static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
240   restore_live_registers(sasm, true, true, false, restore_fpu_registers);
241 }
242 
243 void StubAssembler::save_live_registers() {
244   ::save_live_registers(this);
245 }
246 
247 void StubAssembler::restore_live_registers_without_return() {
248   ::restore_live_registers_without_return(this);
249 }
250 
251 void Runtime1::initialize_pd() {
252 }
253 
254 uint Runtime1::runtime_blob_current_thread_offset(frame f) {
255   unimplemented();
256   return 0;
257 }
258 
259 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
260   OopMap* oop_map = save_live_registers(sasm);
261 
262   int call_offset;
263   if (has_argument) {
264     __ ldr(R1, Address(SP, arg1_offset));
265     __ ldr(R2, Address(SP, arg2_offset));
266     call_offset = __ call_RT(noreg, noreg, target, R1, R2);
267   } else {
268     call_offset = __ call_RT(noreg, noreg, target);
269   }
270 
271   OopMapSet* oop_maps = new OopMapSet();
272   oop_maps->add_gc_map(call_offset, oop_map);
273 
274   DEBUG_ONLY(STOP("generate_exception_throw");)  // Should not reach here
275   return oop_maps;
276 }
277 
278 
279 static void restore_sp_for_method_handle(StubAssembler* sasm) {
280   // Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
281   __ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
282   __ cmp(Rtemp, 0);
283   __ mov(SP, Rmh_SP_save, ne);
284 }
285 
286 
287 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
288   __ block_comment("generate_handle_exception");
289 
290   bool save_fpu_registers = false;
291 
292   // Save registers, if required.
293   OopMapSet* oop_maps = new OopMapSet();
294   OopMap* oop_map = nullptr;
295 
296   switch (id) {
297   case forward_exception_id: {
298     save_fpu_registers = HaveVFP;
299     oop_map = generate_oop_map(sasm);
300     __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
301     __ ldr(Rexception_pc, Address(SP, LR_offset * wordSize));
302     Register zero = __ zero_register(Rtemp);
303     __ str(zero, Address(Rthread, Thread::pending_exception_offset()));
304     break;
305   }
306   case handle_exception_id:
307     save_fpu_registers = HaveVFP;
308     // fall-through
309   case handle_exception_nofpu_id:
310     // At this point all registers MAY be live.
311     oop_map = save_live_registers(sasm, save_fpu_registers);
312     break;
313   case handle_exception_from_callee_id:
314     // At this point all registers except exception oop (R4/R19) and
315     // exception pc (R5/R20) are dead.
316     oop_map = save_live_registers(sasm);  // TODO it's not required to save all registers
317     break;
318   default:  ShouldNotReachHere();
319   }
320 
321   __ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
322   __ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
323 
324   __ str(Rexception_pc, Address(SP, LR_offset * wordSize)); // patch throwing pc into return address
325 
326   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
327   oop_maps->add_gc_map(call_offset, oop_map);
328 
329   // Exception handler found
330   __ str(R0, Address(SP, LR_offset * wordSize)); // patch the return address
331 
332   // Restore the registers that were saved at the beginning, remove
333   // frame and jump to the exception handler.
334   switch (id) {
335   case forward_exception_id:
336   case handle_exception_nofpu_id:
337   case handle_exception_id:
338     restore_live_registers(sasm, save_fpu_registers);
339     // Note: the restore live registers includes the jump to LR (patched to R0)
340     break;
341   case handle_exception_from_callee_id:
342     restore_live_registers_without_return(sasm); // must not jump immediately to handler
343     restore_sp_for_method_handle(sasm);
344     __ ret();
345     break;
346   default:  ShouldNotReachHere();
347   }
348 
349   DEBUG_ONLY(STOP("generate_handle_exception");)  // Should not reach here
350 
351   return oop_maps;
352 }
353 
354 
355 void Runtime1::generate_unwind_exception(StubAssembler* sasm) {
356 
357   if (AbortVMOnException) {
358     save_live_registers(sasm);
359     __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Rexception_obj);
360     restore_live_registers(sasm);
361   }
362 
363   // FP no longer used to find the frame start
364   // on entry, remove_frame() has already been called (restoring FP and LR)
365 
366   // search the exception handler address of the caller (using the return address)
367   __ mov(c_rarg0, Rthread);
368   __ mov(Rexception_pc, LR);
369   __ mov(c_rarg1, LR);
370   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
371 
372   // Exception oop should be still in Rexception_obj and pc in Rexception_pc
373   // Jump to handler
374   __ verify_not_null_oop(Rexception_obj);
375 
376   // JSR292 extension
377   restore_sp_for_method_handle(sasm);
378 
379   __ jump(R0);
380 }
381 
382 
383 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
384   OopMap* oop_map = save_live_registers(sasm);
385 
386   // call the runtime patching routine, returns non-zero if nmethod got deopted.
387   int call_offset = __ call_RT(noreg, noreg, target);
388   OopMapSet* oop_maps = new OopMapSet();
389   oop_maps->add_gc_map(call_offset, oop_map);
390 
391   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
392   assert(deopt_blob != nullptr, "deoptimization blob must have been created");
393 
394   __ cmp_32(R0, 0);
395 
396   restore_live_registers_except_FP_LR(sasm);
397   __ pop(RegisterSet(FP) | RegisterSet(PC), eq);
398 
399   // Deoptimization needed
400   // TODO: ARM - no need to restore FP & LR because unpack_with_reexecution() stores them back
401   __ pop(RegisterSet(FP) | RegisterSet(LR));
402 
403   __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
404 
405   DEBUG_ONLY(STOP("generate_patching");)  // Should not reach here
406   return oop_maps;
407 }
408 
409 
410 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
411   const bool must_gc_arguments = true;
412   const bool dont_gc_arguments = false;
413 
414   OopMapSet* oop_maps = nullptr;
415   bool save_fpu_registers = HaveVFP;
416 
417   switch (id) {
418     case forward_exception_id:
419       {
420         oop_maps = generate_handle_exception(id, sasm);
421         // does not return on ARM
422       }
423       break;
424 
425     case new_instance_id:
426     case fast_new_instance_id:
427     case fast_new_instance_init_check_id:
428       {
429         const Register result = R0;
430         const Register klass  = R1;
431 
432         OopMap* map = save_live_registers(sasm);
433         int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
434         oop_maps = new OopMapSet();
435         oop_maps->add_gc_map(call_offset, map);
436 
437         // MacroAssembler::StoreStore useless (included in the runtime exit path)
438 
439         restore_live_registers_except_R0(sasm);
440       }
441       break;
442 
443     case counter_overflow_id:
444       {
445         OopMap* oop_map = save_live_registers(sasm);
446         __ ldr(R1, Address(SP, arg1_offset));
447         __ ldr(R2, Address(SP, arg2_offset));
448         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2);
449         oop_maps = new OopMapSet();
450         oop_maps->add_gc_map(call_offset, oop_map);
451         restore_live_registers(sasm);
452       }
453       break;
454 
455     case new_type_array_id:
456     case new_object_array_id:
457       {
458         if (id == new_type_array_id) {
459           __ set_info("new_type_array", dont_gc_arguments);
460         } else {
461           __ set_info("new_object_array", dont_gc_arguments);
462         }
463 
464         const Register result = R0;
465         const Register klass  = R1;
466         const Register length = R2;
467 
468         OopMap* map = save_live_registers(sasm);
469         int call_offset;
470         if (id == new_type_array_id) {
471           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
472         } else {
473           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
474         }
475         oop_maps = new OopMapSet();
476         oop_maps->add_gc_map(call_offset, map);
477 
478         // MacroAssembler::StoreStore useless (included in the runtime exit path)
479 
480         restore_live_registers_except_R0(sasm);
481       }
482       break;
483 
484     case new_multi_array_id:
485       {
486         __ set_info("new_multi_array", dont_gc_arguments);
487 
488         // R0: klass
489         // R2: rank
490         // SP: address of 1st dimension
491         const Register result = R0;
492         OopMap* map = save_live_registers(sasm);
493 
494         __ mov(R1, R0);
495         __ add(R3, SP, arg1_offset);
496         int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_multi_array), R1, R2, R3);
497 
498         oop_maps = new OopMapSet();
499         oop_maps->add_gc_map(call_offset, map);
500 
501         // MacroAssembler::StoreStore useless (included in the runtime exit path)
502 
503         restore_live_registers_except_R0(sasm);
504       }
505       break;
506 
507     case register_finalizer_id:
508       {
509         __ set_info("register_finalizer", dont_gc_arguments);
510 
511         // Do not call runtime if JVM_ACC_HAS_FINALIZER flag is not set
512         __ load_klass(Rtemp, R0);
513         __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
514 
515         __ tst(Rtemp, JVM_ACC_HAS_FINALIZER);
516         __ bx(LR, eq);
517 
518         // Call VM
519         OopMap* map = save_live_registers(sasm);
520         oop_maps = new OopMapSet();
521         int call_offset = __ call_RT(noreg, noreg,
522                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R0);
523         oop_maps->add_gc_map(call_offset, map);
524         restore_live_registers(sasm);
525       }
526       break;
527 
528     case throw_range_check_failed_id:
529       {
530         __ set_info("range_check_failed", dont_gc_arguments);
531         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
532       }
533       break;
534 
535     case throw_index_exception_id:
536       {
537         __ set_info("index_range_check_failed", dont_gc_arguments);
538         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
539       }
540       break;
541 
542     case throw_div0_exception_id:
543       {
544         __ set_info("throw_div0_exception", dont_gc_arguments);
545         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
546       }
547       break;
548 
549     case throw_null_pointer_exception_id:
550       {
551         __ set_info("throw_null_pointer_exception", dont_gc_arguments);
552         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
553       }
554       break;
555 
556     case handle_exception_nofpu_id:
557     case handle_exception_id:
558       {
559         __ set_info("handle_exception", dont_gc_arguments);
560         oop_maps = generate_handle_exception(id, sasm);
561       }
562       break;
563 
564     case handle_exception_from_callee_id:
565       {
566         __ set_info("handle_exception_from_callee", dont_gc_arguments);
567         oop_maps = generate_handle_exception(id, sasm);
568       }
569       break;
570 
571     case unwind_exception_id:
572       {
573         __ set_info("unwind_exception", dont_gc_arguments);
574         generate_unwind_exception(sasm);
575       }
576       break;
577 
578     case throw_array_store_exception_id:
579       {
580         __ set_info("throw_array_store_exception", dont_gc_arguments);
581         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
582       }
583       break;
584 
585     case throw_class_cast_exception_id:
586       {
587         __ set_info("throw_class_cast_exception", dont_gc_arguments);
588         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
589       }
590       break;
591 
592     case throw_incompatible_class_change_error_id:
593       {
594         __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
595         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
596       }
597       break;
598 
599     case slow_subtype_check_id:
600       {
601         // (in)  R0 - sub, destroyed,
602         // (in)  R1 - super, not changed
603         // (out) R0 - result: 1 if check passed, 0 otherwise
604         __ raw_push(R2, R3, LR);
605 
606         // Load an array of secondary_supers
607         __ ldr(R2, Address(R0, Klass::secondary_supers_offset()));
608         // Length goes to R3
609         __ ldr_s32(R3, Address(R2, Array<Klass*>::length_offset_in_bytes()));
610         __ add(R2, R2, Array<Klass*>::base_offset_in_bytes());
611 
612         Label loop, miss;
613         __ bind(loop);
614         __ cbz(R3, miss);
615         __ ldr(LR, Address(R2, wordSize, post_indexed));
616         __ sub(R3, R3, 1);
617         __ cmp(LR, R1);
618         __ b(loop, ne);
619 
620         // We get here if an equal cache entry is found
621         __ str(R1, Address(R0, Klass::secondary_super_cache_offset()));
622         __ mov(R0, 1);
623         __ raw_pop_and_ret(R2, R3);
624 
625         // A cache entry not found - return false
626         __ bind(miss);
627         __ mov(R0, 0);
628         __ raw_pop_and_ret(R2, R3);
629       }
630       break;
631 
632     case monitorenter_nofpu_id:
633       save_fpu_registers = false;
634       // fall through
635     case monitorenter_id:
636       {
637         __ set_info("monitorenter", dont_gc_arguments);
638         const Register obj  = R1;
639         const Register lock = R2;
640         OopMap* map = save_live_registers(sasm, save_fpu_registers);
641         __ ldr(obj, Address(SP, arg1_offset));
642         __ ldr(lock, Address(SP, arg2_offset));
643         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), obj, lock);
644         oop_maps = new OopMapSet();
645         oop_maps->add_gc_map(call_offset, map);
646         restore_live_registers(sasm, save_fpu_registers);
647       }
648       break;
649 
650     case monitorexit_nofpu_id:
651       save_fpu_registers = false;
652       // fall through
653     case monitorexit_id:
654       {
655         __ set_info("monitorexit", dont_gc_arguments);
656         const Register lock = R1;
657         OopMap* map = save_live_registers(sasm, save_fpu_registers);
658         __ ldr(lock, Address(SP, arg1_offset));
659         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), lock);
660         oop_maps = new OopMapSet();
661         oop_maps->add_gc_map(call_offset, map);
662         restore_live_registers(sasm, save_fpu_registers);
663       }
664       break;
665 
666     case deoptimize_id:
667       {
668         __ set_info("deoptimize", dont_gc_arguments);
669         OopMap* oop_map = save_live_registers(sasm);
670         const Register trap_request = R1;
671         __ ldr(trap_request, Address(SP, arg1_offset));
672         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
673         oop_maps = new OopMapSet();
674         oop_maps->add_gc_map(call_offset, oop_map);
675         restore_live_registers_without_return(sasm);
676         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
677         assert(deopt_blob != nullptr, "deoptimization blob must have been created");
678         __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, noreg);
679       }
680       break;
681 
682     case access_field_patching_id:
683       {
684         __ set_info("access_field_patching", dont_gc_arguments);
685         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
686       }
687       break;
688 
689     case load_klass_patching_id:
690       {
691         __ set_info("load_klass_patching", dont_gc_arguments);
692         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
693       }
694       break;
695 
696     case load_appendix_patching_id:
697       {
698         __ set_info("load_appendix_patching", dont_gc_arguments);
699         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
700       }
701       break;
702 
703     case load_mirror_patching_id:
704       {
705         __ set_info("load_mirror_patching", dont_gc_arguments);
706         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
707       }
708       break;
709 
710     case predicate_failed_trap_id:
711       {
712         __ set_info("predicate_failed_trap", dont_gc_arguments);
713 
714         OopMap* oop_map = save_live_registers(sasm);
715         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
716 
717         oop_maps = new OopMapSet();
718         oop_maps->add_gc_map(call_offset, oop_map);
719 
720         restore_live_registers_without_return(sasm);
721 
722         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
723         assert(deopt_blob != nullptr, "deoptimization blob must have been created");
724         __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
725       }
726       break;
727 
728     default:
729       {
730         __ set_info("unimplemented entry", dont_gc_arguments);
731         STOP("unimplemented entry");
732       }
733       break;
734   }
735   return oop_maps;
736 }
737 
738 #undef __
739 
740 #ifdef __SOFTFP__
741 const char *Runtime1::pd_name_for_address(address entry) {
742 
743 #define FUNCTION_CASE(a, f) \
744   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
745 
746   FUNCTION_CASE(entry, __aeabi_fadd_glibc);
747   FUNCTION_CASE(entry, __aeabi_fmul);
748   FUNCTION_CASE(entry, __aeabi_fsub_glibc);
749   FUNCTION_CASE(entry, __aeabi_fdiv);
750 
751   // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
752   FUNCTION_CASE(entry, __aeabi_dadd_glibc);
753   FUNCTION_CASE(entry, __aeabi_dmul);
754   FUNCTION_CASE(entry, __aeabi_dsub_glibc);
755   FUNCTION_CASE(entry, __aeabi_ddiv);
756 
757   FUNCTION_CASE(entry, __aeabi_f2d);
758   FUNCTION_CASE(entry, __aeabi_d2f);
759   FUNCTION_CASE(entry, __aeabi_i2f);
760   FUNCTION_CASE(entry, __aeabi_i2d);
761   FUNCTION_CASE(entry, __aeabi_f2iz);
762 
763   FUNCTION_CASE(entry, SharedRuntime::fcmpl);
764   FUNCTION_CASE(entry, SharedRuntime::fcmpg);
765   FUNCTION_CASE(entry, SharedRuntime::dcmpl);
766   FUNCTION_CASE(entry, SharedRuntime::dcmpg);
767 
768   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmplt);
769   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmplt);
770   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple);
771   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple);
772   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpge);
773   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpge);
774   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpgt);
775   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpgt);
776 
777   FUNCTION_CASE(entry, SharedRuntime::fneg);
778   FUNCTION_CASE(entry, SharedRuntime::dneg);
779 
780   FUNCTION_CASE(entry, __aeabi_fcmpeq);
781   FUNCTION_CASE(entry, __aeabi_fcmplt);
782   FUNCTION_CASE(entry, __aeabi_fcmple);
783   FUNCTION_CASE(entry, __aeabi_fcmpge);
784   FUNCTION_CASE(entry, __aeabi_fcmpgt);
785 
786   FUNCTION_CASE(entry, __aeabi_dcmpeq);
787   FUNCTION_CASE(entry, __aeabi_dcmplt);
788   FUNCTION_CASE(entry, __aeabi_dcmple);
789   FUNCTION_CASE(entry, __aeabi_dcmpge);
790   FUNCTION_CASE(entry, __aeabi_dcmpgt);
791 #undef FUNCTION_CASE
792   return "";
793 }
794 #else  // __SOFTFP__
795 const char *Runtime1::pd_name_for_address(address entry) {
796   return "<unknown function>";
797 }
798 #endif // __SOFTFP__