1 /*
  2  * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "c1/c1_Defs.hpp"
 28 #include "c1/c1_LIRAssembler.hpp"
 29 #include "c1/c1_MacroAssembler.hpp"
 30 #include "c1/c1_Runtime1.hpp"
 31 #include "ci/ciUtilities.hpp"
 32 #include "compiler/oopMap.hpp"
 33 #include "gc/shared/cardTable.hpp"
 34 #include "gc/shared/cardTableBarrierSet.hpp"
 35 #include "gc/shared/collectedHeap.hpp"
 36 #include "gc/shared/tlab_globals.hpp"
 37 #include "interpreter/interpreter.hpp"
 38 #include "memory/universe.hpp"
 39 #include "nativeInst_arm.hpp"
 40 #include "oops/compiledICHolder.hpp"
 41 #include "oops/oop.inline.hpp"
 42 #include "prims/jvmtiExport.hpp"
 43 #include "register_arm.hpp"
 44 #include "runtime/sharedRuntime.hpp"
 45 #include "runtime/signature.hpp"
 46 #include "runtime/vframeArray.hpp"
 47 #include "utilities/align.hpp"
 48 #include "vmreg_arm.inline.hpp"
 49 
 50 // Note: Rtemp usage is this file should not impact C2 and should be
 51 // correct as long as it is not implicitly used in lower layers (the
 52 // arm [macro]assembler) and used with care in the other C1 specific
 53 // files.
 54 
 55 // Implementation of StubAssembler
 56 
 57 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
 58   mov(R0, Rthread);
 59 
 60   int call_offset = set_last_Java_frame(SP, FP, false, Rtemp);
 61 
 62   call(entry);
 63   if (call_offset == -1) { // PC not saved
 64     call_offset = offset();
 65   }
 66   reset_last_Java_frame(Rtemp);
 67 
 68   assert(frame_size() != no_frame_size, "frame must be fixed");
 69   if (_stub_id != Runtime1::forward_exception_id) {
 70     ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
 71   }
 72 
 73   if (oop_result1->is_valid()) {
 74     assert_different_registers(oop_result1, R3, Rtemp);
 75     get_vm_result(oop_result1, Rtemp);
 76   }
 77   if (metadata_result->is_valid()) {
 78     assert_different_registers(metadata_result, R3, Rtemp);
 79     get_vm_result_2(metadata_result, Rtemp);
 80   }
 81 
 82   // Check for pending exception
 83   // unpack_with_exception_in_tls path is taken through
 84   // Runtime1::exception_handler_for_pc
 85   if (_stub_id != Runtime1::forward_exception_id) {
 86     assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id");
 87     cmp(R3, 0);
 88     jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
 89   } else {
 90 #ifdef ASSERT
 91     // Should not have pending exception in forward_exception stub
 92     ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
 93     cmp(R3, 0);
 94     breakpoint(ne);
 95 #endif // ASSERT
 96   }
 97   return call_offset;
 98 }
 99 
100 
101 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
102   if (arg1 != R1) {
103     mov(R1, arg1);
104   }
105   return call_RT(oop_result1, metadata_result, entry, 1);
106 }
107 
108 
109 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
110   assert(arg1 == R1 && arg2 == R2, "cannot handle otherwise");
111   return call_RT(oop_result1, metadata_result, entry, 2);
112 }
113 
114 
115 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
116   assert(arg1 == R1 && arg2 == R2 && arg3 == R3, "cannot handle otherwise");
117   return call_RT(oop_result1, metadata_result, entry, 3);
118 }
119 
120 
121 #define __ sasm->
122 
123 // TODO: ARM - does this duplicate RegisterSaver in SharedRuntime?
124 
125 enum RegisterLayout {
126   fpu_save_size = pd_nof_fpu_regs_reg_alloc,
127 #ifndef __SOFTFP__
128   D0_offset = 0,
129 #endif
130   R0_offset = fpu_save_size,
131   R1_offset,
132   R2_offset,
133   R3_offset,
134   R4_offset,
135   R5_offset,
136   R6_offset,
137 #if (FP_REG_NUM != 7)
138   R7_offset,
139 #endif
140   R8_offset,
141   R9_offset,
142   R10_offset,
143 #if (FP_REG_NUM != 11)
144   R11_offset,
145 #endif
146   R12_offset,
147   FP_offset,
148   LR_offset,
149   reg_save_size,
150   arg1_offset = reg_save_size * wordSize,
151   arg2_offset = (reg_save_size + 1) * wordSize
152 };
153 
154 
155 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
156   sasm->set_frame_size(reg_save_size /* in words */);
157 
158   // Record saved value locations in an OopMap.
159   // Locations are offsets from sp after runtime call.
160   OopMap* map = new OopMap(VMRegImpl::slots_per_word * reg_save_size, 0);
161 
162   int j=0;
163   for (int i = R0_offset; i < R10_offset; i++) {
164     if (j == FP_REG_NUM) {
165       // skip the FP register, saved below
166       j++;
167     }
168     map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg());
169     j++;
170   }
171   assert(j == R10->encoding(), "must be");
172 #if (FP_REG_NUM != 11)
173   // add R11, if not saved as FP
174   map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg());
175 #endif
176   map->set_callee_saved(VMRegImpl::stack2reg(FP_offset), FP->as_VMReg());
177   map->set_callee_saved(VMRegImpl::stack2reg(LR_offset), LR->as_VMReg());
178 
179   if (save_fpu_registers) {
180     for (int i = 0; i < fpu_save_size; i++) {
181       map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg());
182     }
183   }
184 
185   return map;
186 }
187 
188 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
189   __ block_comment("save_live_registers");
190   sasm->set_frame_size(reg_save_size /* in words */);
191 
192   __ push(RegisterSet(FP) | RegisterSet(LR));
193   __ push(RegisterSet(R0, R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
194   if (save_fpu_registers) {
195     __ fpush(FloatRegisterSet(D0, fpu_save_size / 2));
196   } else {
197     __ sub(SP, SP, fpu_save_size * wordSize);
198   }
199 
200   return generate_oop_map(sasm, save_fpu_registers);
201 }
202 
203 
204 static void restore_live_registers(StubAssembler* sasm,
205                                    bool restore_R0,
206                                    bool restore_FP_LR,
207                                    bool do_return,
208                                    bool restore_fpu_registers = HaveVFP) {
209   __ block_comment("restore_live_registers");
210 
211   if (restore_fpu_registers) {
212     __ fpop(FloatRegisterSet(D0, fpu_save_size / 2));
213     if (!restore_R0) {
214       __ add(SP, SP, (R1_offset - fpu_save_size) * wordSize);
215     }
216   } else {
217     __ add(SP, SP, (restore_R0 ? fpu_save_size : R1_offset) * wordSize);
218   }
219   __ pop(RegisterSet((restore_R0 ? R0 : R1), R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
220   if (restore_FP_LR) {
221     __ pop(RegisterSet(FP) | RegisterSet(do_return ? PC : LR));
222   } else {
223     assert (!do_return, "return without restoring FP/LR");
224   }
225 }
226 
227 
228 static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
229   restore_live_registers(sasm, false, true, true, restore_fpu_registers);
230 }
231 
232 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
233   restore_live_registers(sasm, true, true, true, restore_fpu_registers);
234 }
235 
236 static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
237   restore_live_registers(sasm, true, false, false, restore_fpu_registers);
238 }
239 
240 static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
241   restore_live_registers(sasm, true, true, false, restore_fpu_registers);
242 }
243 
244 void StubAssembler::save_live_registers() {
245   ::save_live_registers(this);
246 }
247 
248 void StubAssembler::restore_live_registers_without_return() {
249   ::restore_live_registers_without_return(this);
250 }
251 
252 void Runtime1::initialize_pd() {
253 }
254 
255 
256 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
257   OopMap* oop_map = save_live_registers(sasm);
258 
259   int call_offset;
260   if (has_argument) {
261     __ ldr(R1, Address(SP, arg1_offset));
262     __ ldr(R2, Address(SP, arg2_offset));
263     call_offset = __ call_RT(noreg, noreg, target, R1, R2);
264   } else {
265     call_offset = __ call_RT(noreg, noreg, target);
266   }
267 
268   OopMapSet* oop_maps = new OopMapSet();
269   oop_maps->add_gc_map(call_offset, oop_map);
270 
271   DEBUG_ONLY(STOP("generate_exception_throw");)  // Should not reach here
272   return oop_maps;
273 }
274 
275 
276 static void restore_sp_for_method_handle(StubAssembler* sasm) {
277   // Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
278   __ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
279   __ cmp(Rtemp, 0);
280   __ mov(SP, Rmh_SP_save, ne);
281 }
282 
283 
284 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
285   __ block_comment("generate_handle_exception");
286 
287   bool save_fpu_registers = false;
288 
289   // Save registers, if required.
290   OopMapSet* oop_maps = new OopMapSet();
291   OopMap* oop_map = NULL;
292 
293   switch (id) {
294   case forward_exception_id: {
295     save_fpu_registers = HaveVFP;
296     oop_map = generate_oop_map(sasm);
297     __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
298     __ ldr(Rexception_pc, Address(SP, LR_offset * wordSize));
299     Register zero = __ zero_register(Rtemp);
300     __ str(zero, Address(Rthread, Thread::pending_exception_offset()));
301     break;
302   }
303   case handle_exception_id:
304     save_fpu_registers = HaveVFP;
305     // fall-through
306   case handle_exception_nofpu_id:
307     // At this point all registers MAY be live.
308     oop_map = save_live_registers(sasm, save_fpu_registers);
309     break;
310   case handle_exception_from_callee_id:
311     // At this point all registers except exception oop (R4/R19) and
312     // exception pc (R5/R20) are dead.
313     oop_map = save_live_registers(sasm);  // TODO it's not required to save all registers
314     break;
315   default:  ShouldNotReachHere();
316   }
317 
318   __ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
319   __ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
320 
321   __ str(Rexception_pc, Address(SP, LR_offset * wordSize)); // patch throwing pc into return address
322 
323   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
324   oop_maps->add_gc_map(call_offset, oop_map);
325 
326   // Exception handler found
327   __ str(R0, Address(SP, LR_offset * wordSize)); // patch the return address
328 
329   // Restore the registers that were saved at the beginning, remove
330   // frame and jump to the exception handler.
331   switch (id) {
332   case forward_exception_id:
333   case handle_exception_nofpu_id:
334   case handle_exception_id:
335     restore_live_registers(sasm, save_fpu_registers);
336     // Note: the restore live registers includes the jump to LR (patched to R0)
337     break;
338   case handle_exception_from_callee_id:
339     restore_live_registers_without_return(sasm); // must not jump immediately to handler
340     restore_sp_for_method_handle(sasm);
341     __ ret();
342     break;
343   default:  ShouldNotReachHere();
344   }
345 
346   DEBUG_ONLY(STOP("generate_handle_exception");)  // Should not reach here
347 
348   return oop_maps;
349 }
350 
351 
352 void Runtime1::generate_unwind_exception(StubAssembler* sasm) {
353   // FP no longer used to find the frame start
354   // on entry, remove_frame() has already been called (restoring FP and LR)
355 
356   // search the exception handler address of the caller (using the return address)
357   __ mov(c_rarg0, Rthread);
358   __ mov(Rexception_pc, LR);
359   __ mov(c_rarg1, LR);
360   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
361 
362   // Exception oop should be still in Rexception_obj and pc in Rexception_pc
363   // Jump to handler
364   __ verify_not_null_oop(Rexception_obj);
365 
366   // JSR292 extension
367   restore_sp_for_method_handle(sasm);
368 
369   __ jump(R0);
370 }
371 
372 
373 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
374   OopMap* oop_map = save_live_registers(sasm);
375 
376   // call the runtime patching routine, returns non-zero if nmethod got deopted.
377   int call_offset = __ call_RT(noreg, noreg, target);
378   OopMapSet* oop_maps = new OopMapSet();
379   oop_maps->add_gc_map(call_offset, oop_map);
380 
381   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
382   assert(deopt_blob != NULL, "deoptimization blob must have been created");
383 
384   __ cmp_32(R0, 0);
385 
386   restore_live_registers_except_FP_LR(sasm);
387   __ pop(RegisterSet(FP) | RegisterSet(PC), eq);
388 
389   // Deoptimization needed
390   // TODO: ARM - no need to restore FP & LR because unpack_with_reexecution() stores them back
391   __ pop(RegisterSet(FP) | RegisterSet(LR));
392 
393   __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
394 
395   DEBUG_ONLY(STOP("generate_patching");)  // Should not reach here
396   return oop_maps;
397 }
398 
399 
400 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
401   const bool must_gc_arguments = true;
402   const bool dont_gc_arguments = false;
403 
404   OopMapSet* oop_maps = NULL;
405   bool save_fpu_registers = HaveVFP;
406 
407   switch (id) {
408     case forward_exception_id:
409       {
410         oop_maps = generate_handle_exception(id, sasm);
411         // does not return on ARM
412       }
413       break;
414 
415     case new_instance_id:
416     case fast_new_instance_id:
417     case fast_new_instance_init_check_id:
418       {
419         const Register result = R0;
420         const Register klass  = R1;
421 
422         OopMap* map = save_live_registers(sasm);
423         int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
424         oop_maps = new OopMapSet();
425         oop_maps->add_gc_map(call_offset, map);
426 
427         // MacroAssembler::StoreStore useless (included in the runtime exit path)
428 
429         restore_live_registers_except_R0(sasm);
430       }
431       break;
432 
433     case counter_overflow_id:
434       {
435         OopMap* oop_map = save_live_registers(sasm);
436         __ ldr(R1, Address(SP, arg1_offset));
437         __ ldr(R2, Address(SP, arg2_offset));
438         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2);
439         oop_maps = new OopMapSet();
440         oop_maps->add_gc_map(call_offset, oop_map);
441         restore_live_registers(sasm);
442       }
443       break;
444 
445     case new_type_array_id:
446     case new_object_array_id:
447       {
448         if (id == new_type_array_id) {
449           __ set_info("new_type_array", dont_gc_arguments);
450         } else {
451           __ set_info("new_object_array", dont_gc_arguments);
452         }
453 
454         const Register result = R0;
455         const Register klass  = R1;
456         const Register length = R2;
457 
458         OopMap* map = save_live_registers(sasm);
459         int call_offset;
460         if (id == new_type_array_id) {
461           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
462         } else {
463           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
464         }
465         oop_maps = new OopMapSet();
466         oop_maps->add_gc_map(call_offset, map);
467 
468         // MacroAssembler::StoreStore useless (included in the runtime exit path)
469 
470         restore_live_registers_except_R0(sasm);
471       }
472       break;
473 
474     case new_multi_array_id:
475       {
476         __ set_info("new_multi_array", dont_gc_arguments);
477 
478         // R0: klass
479         // R2: rank
480         // SP: address of 1st dimension
481         const Register result = R0;
482         OopMap* map = save_live_registers(sasm);
483 
484         __ mov(R1, R0);
485         __ add(R3, SP, arg1_offset);
486         int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_multi_array), R1, R2, R3);
487 
488         oop_maps = new OopMapSet();
489         oop_maps->add_gc_map(call_offset, map);
490 
491         // MacroAssembler::StoreStore useless (included in the runtime exit path)
492 
493         restore_live_registers_except_R0(sasm);
494       }
495       break;
496 
497     case register_finalizer_id:
498       {
499         __ set_info("register_finalizer", dont_gc_arguments);
500 
501         // Do not call runtime if JVM_ACC_HAS_FINALIZER flag is not set
502         __ load_klass(Rtemp, R0);
503         __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
504 
505         __ tst(Rtemp, JVM_ACC_HAS_FINALIZER);
506         __ bx(LR, eq);
507 
508         // Call VM
509         OopMap* map = save_live_registers(sasm);
510         oop_maps = new OopMapSet();
511         int call_offset = __ call_RT(noreg, noreg,
512                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R0);
513         oop_maps->add_gc_map(call_offset, map);
514         restore_live_registers(sasm);
515       }
516       break;
517 
518     case throw_range_check_failed_id:
519       {
520         __ set_info("range_check_failed", dont_gc_arguments);
521         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
522       }
523       break;
524 
525     case throw_index_exception_id:
526       {
527         __ set_info("index_range_check_failed", dont_gc_arguments);
528         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
529       }
530       break;
531 
532     case throw_div0_exception_id:
533       {
534         __ set_info("throw_div0_exception", dont_gc_arguments);
535         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
536       }
537       break;
538 
539     case throw_null_pointer_exception_id:
540       {
541         __ set_info("throw_null_pointer_exception", dont_gc_arguments);
542         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
543       }
544       break;
545 
546     case handle_exception_nofpu_id:
547     case handle_exception_id:
548       {
549         __ set_info("handle_exception", dont_gc_arguments);
550         oop_maps = generate_handle_exception(id, sasm);
551       }
552       break;
553 
554     case handle_exception_from_callee_id:
555       {
556         __ set_info("handle_exception_from_callee", dont_gc_arguments);
557         oop_maps = generate_handle_exception(id, sasm);
558       }
559       break;
560 
561     case unwind_exception_id:
562       {
563         __ set_info("unwind_exception", dont_gc_arguments);
564         generate_unwind_exception(sasm);
565       }
566       break;
567 
568     case throw_array_store_exception_id:
569       {
570         __ set_info("throw_array_store_exception", dont_gc_arguments);
571         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
572       }
573       break;
574 
575     case throw_class_cast_exception_id:
576       {
577         __ set_info("throw_class_cast_exception", dont_gc_arguments);
578         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
579       }
580       break;
581 
582     case throw_incompatible_class_change_error_id:
583       {
584         __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
585         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
586       }
587       break;
588 
589     case slow_subtype_check_id:
590       {
591         // (in)  R0 - sub, destroyed,
592         // (in)  R1 - super, not changed
593         // (out) R0 - result: 1 if check passed, 0 otherwise
594         __ raw_push(R2, R3, LR);
595 
596         // Load an array of secondary_supers
597         __ ldr(R2, Address(R0, Klass::secondary_supers_offset()));
598         // Length goes to R3
599         __ ldr_s32(R3, Address(R2, Array<Klass*>::length_offset_in_bytes()));
600         __ add(R2, R2, Array<Klass*>::base_offset_in_bytes());
601 
602         Label loop, miss;
603         __ bind(loop);
604         __ cbz(R3, miss);
605         __ ldr(LR, Address(R2, wordSize, post_indexed));
606         __ sub(R3, R3, 1);
607         __ cmp(LR, R1);
608         __ b(loop, ne);
609 
610         // We get here if an equal cache entry is found
611         __ str(R1, Address(R0, Klass::secondary_super_cache_offset()));
612         __ mov(R0, 1);
613         __ raw_pop_and_ret(R2, R3);
614 
615         // A cache entry not found - return false
616         __ bind(miss);
617         __ mov(R0, 0);
618         __ raw_pop_and_ret(R2, R3);
619       }
620       break;
621 
622     case monitorenter_nofpu_id:
623       save_fpu_registers = false;
624       // fall through
625     case monitorenter_id:
626       {
627         __ set_info("monitorenter", dont_gc_arguments);
628         const Register obj  = R1;
629         const Register lock = R2;
630         OopMap* map = save_live_registers(sasm, save_fpu_registers);
631         __ ldr(obj, Address(SP, arg1_offset));
632         __ ldr(lock, Address(SP, arg2_offset));
633         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), obj, lock);
634         oop_maps = new OopMapSet();
635         oop_maps->add_gc_map(call_offset, map);
636         restore_live_registers(sasm, save_fpu_registers);
637       }
638       break;
639 
640     case monitorexit_nofpu_id:
641       save_fpu_registers = false;
642       // fall through
643     case monitorexit_id:
644       {
645         __ set_info("monitorexit", dont_gc_arguments);
646         const Register lock = R1;
647         OopMap* map = save_live_registers(sasm, save_fpu_registers);
648         __ ldr(lock, Address(SP, arg1_offset));
649         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), lock);
650         oop_maps = new OopMapSet();
651         oop_maps->add_gc_map(call_offset, map);
652         restore_live_registers(sasm, save_fpu_registers);
653       }
654       break;
655 
656     case deoptimize_id:
657       {
658         __ set_info("deoptimize", dont_gc_arguments);
659         OopMap* oop_map = save_live_registers(sasm);
660         const Register trap_request = R1;
661         __ ldr(trap_request, Address(SP, arg1_offset));
662         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
663         oop_maps = new OopMapSet();
664         oop_maps->add_gc_map(call_offset, oop_map);
665         restore_live_registers_without_return(sasm);
666         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
667         assert(deopt_blob != NULL, "deoptimization blob must have been created");
668         __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, noreg);
669       }
670       break;
671 
672     case access_field_patching_id:
673       {
674         __ set_info("access_field_patching", dont_gc_arguments);
675         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
676       }
677       break;
678 
679     case load_klass_patching_id:
680       {
681         __ set_info("load_klass_patching", dont_gc_arguments);
682         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
683       }
684       break;
685 
686     case load_appendix_patching_id:
687       {
688         __ set_info("load_appendix_patching", dont_gc_arguments);
689         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
690       }
691       break;
692 
693     case load_mirror_patching_id:
694       {
695         __ set_info("load_mirror_patching", dont_gc_arguments);
696         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
697       }
698       break;
699 
700     case predicate_failed_trap_id:
701       {
702         __ set_info("predicate_failed_trap", dont_gc_arguments);
703 
704         OopMap* oop_map = save_live_registers(sasm);
705         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
706 
707         oop_maps = new OopMapSet();
708         oop_maps->add_gc_map(call_offset, oop_map);
709 
710         restore_live_registers_without_return(sasm);
711 
712         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
713         assert(deopt_blob != NULL, "deoptimization blob must have been created");
714         __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
715       }
716       break;
717 
718     default:
719       {
720         __ set_info("unimplemented entry", dont_gc_arguments);
721         STOP("unimplemented entry");
722       }
723       break;
724   }
725   return oop_maps;
726 }
727 
728 #undef __
729 
730 #ifdef __SOFTFP__
731 const char *Runtime1::pd_name_for_address(address entry) {
732 
733 #define FUNCTION_CASE(a, f) \
734   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
735 
736   FUNCTION_CASE(entry, __aeabi_fadd_glibc);
737   FUNCTION_CASE(entry, __aeabi_fmul);
738   FUNCTION_CASE(entry, __aeabi_fsub_glibc);
739   FUNCTION_CASE(entry, __aeabi_fdiv);
740 
741   // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
742   FUNCTION_CASE(entry, __aeabi_dadd_glibc);
743   FUNCTION_CASE(entry, __aeabi_dmul);
744   FUNCTION_CASE(entry, __aeabi_dsub_glibc);
745   FUNCTION_CASE(entry, __aeabi_ddiv);
746 
747   FUNCTION_CASE(entry, __aeabi_f2d);
748   FUNCTION_CASE(entry, __aeabi_d2f);
749   FUNCTION_CASE(entry, __aeabi_i2f);
750   FUNCTION_CASE(entry, __aeabi_i2d);
751   FUNCTION_CASE(entry, __aeabi_f2iz);
752 
753   FUNCTION_CASE(entry, SharedRuntime::fcmpl);
754   FUNCTION_CASE(entry, SharedRuntime::fcmpg);
755   FUNCTION_CASE(entry, SharedRuntime::dcmpl);
756   FUNCTION_CASE(entry, SharedRuntime::dcmpg);
757 
758   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmplt);
759   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmplt);
760   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple);
761   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple);
762   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpge);
763   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpge);
764   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpgt);
765   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpgt);
766 
767   FUNCTION_CASE(entry, SharedRuntime::fneg);
768   FUNCTION_CASE(entry, SharedRuntime::dneg);
769 
770   FUNCTION_CASE(entry, __aeabi_fcmpeq);
771   FUNCTION_CASE(entry, __aeabi_fcmplt);
772   FUNCTION_CASE(entry, __aeabi_fcmple);
773   FUNCTION_CASE(entry, __aeabi_fcmpge);
774   FUNCTION_CASE(entry, __aeabi_fcmpgt);
775 
776   FUNCTION_CASE(entry, __aeabi_dcmpeq);
777   FUNCTION_CASE(entry, __aeabi_dcmplt);
778   FUNCTION_CASE(entry, __aeabi_dcmple);
779   FUNCTION_CASE(entry, __aeabi_dcmpge);
780   FUNCTION_CASE(entry, __aeabi_dcmpgt);
781 #undef FUNCTION_CASE
782   return "";
783 }
784 #else  // __SOFTFP__
785 const char *Runtime1::pd_name_for_address(address entry) {
786   return "<unknown function>";
787 }
788 #endif // __SOFTFP__