1 /*
   2  * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Defs.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/cardTable.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "memory/universe.hpp"
  39 #include "nativeInst_riscv.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "prims/jvmtiExport.hpp"
  42 #include "register_riscv.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/signature.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/vframe.hpp"
  47 #include "runtime/vframeArray.hpp"
  48 #include "utilities/powerOfTwo.hpp"
  49 #include "vmreg_riscv.inline.hpp"
  50 
  51 
  52 // Implementation of StubAssembler
  53 
  54 int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, int args_size) {
  55   // setup registers
  56   assert(!(oop_result->is_valid() || metadata_result->is_valid()) || oop_result != metadata_result,
  57          "registers must be different");
  58   assert(oop_result != xthread && metadata_result != xthread, "registers must be different");
  59   assert(args_size >= 0, "illegal args_size");
  60   bool align_stack = false;
  61 
  62   mv(c_rarg0, xthread);
  63   set_num_rt_args(0); // Nothing on stack
  64 
  65   Label retaddr;
  66   set_last_Java_frame(sp, fp, retaddr, t0);
  67 
  68   // do the call
  69   rt_call(entry);
  70   bind(retaddr);
  71   int call_offset = offset();
  72   // verify callee-saved register
  73 #ifdef ASSERT
  74   push_reg(x10, sp);
  75   { Label L;
  76     get_thread(x10);
  77     beq(xthread, x10, L);
  78     stop("StubAssembler::call_RT: xthread not callee saved?");
  79     bind(L);
  80   }
  81   pop_reg(x10, sp);
  82 #endif
  83   reset_last_Java_frame(true);
  84 
  85   // check for pending exceptions
  86   { Label L;
  87     // check for pending exceptions (java_thread is set upon return)
  88     ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
  89     beqz(t0, L);
  90     // exception pending => remove activation and forward to exception handler
  91     // make sure that the vm_results are cleared
  92     if (oop_result->is_valid()) {
  93       sd(zr, Address(xthread, JavaThread::vm_result_offset()));
  94     }
  95     if (metadata_result->is_valid()) {
  96       sd(zr, Address(xthread, JavaThread::vm_result_2_offset()));
  97     }
  98     if (frame_size() == no_frame_size) {
  99       leave();
 100       far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 101     } else if (_stub_id == Runtime1::forward_exception_id) {
 102       should_not_reach_here();
 103     } else {
 104       far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
 105     }
 106     bind(L);
 107   }
 108   // get oop results if there are any and reset the values in the thread
 109   if (oop_result->is_valid()) {
 110     get_vm_result(oop_result, xthread);
 111   }
 112   if (metadata_result->is_valid()) {
 113     get_vm_result_2(metadata_result, xthread);
 114   }
 115   return call_offset;
 116 }
 117 
 118 int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, Register arg1) {
 119   mv(c_rarg1, arg1);
 120   return call_RT(oop_result, metadata_result, entry, 1);
 121 }
 122 
 123 int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, Register arg1, Register arg2) {
 124   const int arg_num = 2;
 125   if (c_rarg1 == arg2) {
 126     if (c_rarg2 == arg1) {
 127       xorr(arg1, arg1, arg2);
 128       xorr(arg2, arg1, arg2);
 129       xorr(arg1, arg1, arg2);
 130     } else {
 131       mv(c_rarg2, arg2);
 132       mv(c_rarg1, arg1);
 133     }
 134   } else {
 135     mv(c_rarg1, arg1);
 136     mv(c_rarg2, arg2);
 137   }
 138   return call_RT(oop_result, metadata_result, entry, arg_num);
 139 }
 140 
 141 int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
 142   const int arg_num = 3;
 143   // if there is any conflict use the stack
 144   if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
 145       arg2 == c_rarg1 || arg2 == c_rarg3 ||
 146       arg3 == c_rarg1 || arg3 == c_rarg2) {
 147     const int arg1_sp_offset = 0;
 148     const int arg2_sp_offset = 1;
 149     const int arg3_sp_offset = 2;
 150     addi(sp, sp, -(arg_num + 1) * wordSize);
 151     sd(arg1, Address(sp, arg1_sp_offset * wordSize));
 152     sd(arg2, Address(sp, arg2_sp_offset * wordSize));
 153     sd(arg3, Address(sp, arg3_sp_offset * wordSize));
 154 
 155     ld(c_rarg1, Address(sp, arg1_sp_offset * wordSize));
 156     ld(c_rarg2, Address(sp, arg2_sp_offset * wordSize));
 157     ld(c_rarg3, Address(sp, arg3_sp_offset * wordSize));
 158     addi(sp, sp, (arg_num + 1) * wordSize);
 159   } else {
 160     mv(c_rarg1, arg1);
 161     mv(c_rarg2, arg2);
 162     mv(c_rarg3, arg3);
 163   }
 164   return call_RT(oop_result, metadata_result, entry, arg_num);
 165 }
 166 
 167 enum return_state_t {
 168   does_not_return, requires_return
 169 };
 170 
 171 // Implementation of StubFrame
 172 
 173 class StubFrame: public StackObj {
 174  private:
 175   StubAssembler* _sasm;
 176   bool _return_state;
 177 
 178  public:
 179   StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state=requires_return);
 180   void load_argument(int offset_in_words, Register reg);
 181 
 182   ~StubFrame();
 183 };;
 184 
 185 void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
 186   set_info(name, must_gc_arguments);
 187   enter();
 188 }
 189 
 190 void StubAssembler::epilogue() {
 191   leave();
 192   ret();
 193 }
 194 
 195 #define __ _sasm->
 196 
 197 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state) {
 198   _sasm = sasm;
 199   _return_state = return_state;
 200   __ prologue(name, must_gc_arguments);
 201 }
 202 
 203 // load parameters that were stored with LIR_Assembler::store_parameter
 204 // Note: offsets for store_parameter and load_argument must match
 205 void StubFrame::load_argument(int offset_in_words, Register reg) {
 206   __ load_parameter(offset_in_words, reg);
 207 }
 208 
 209 
 210 StubFrame::~StubFrame() {
 211   if (_return_state == requires_return) {
 212     __ epilogue();
 213   } else {
 214     __ should_not_reach_here();
 215   }
 216   _sasm = nullptr;
 217 }
 218 
 219 #undef __
 220 
 221 
 222 // Implementation of Runtime1
 223 
 224 #define __ sasm->
 225 
 226 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
 227 
 228 // Stack layout for saving/restoring  all the registers needed during a runtime
 229 // call (this includes deoptimization)
 230 // Note: note that users of this frame may well have arguments to some runtime
 231 // while these values are on the stack. These positions neglect those arguments
 232 // but the code in save_live_registers will take the argument count into
 233 // account.
 234 //
 235 
 236 enum reg_save_layout {
 237   reg_save_frame_size = 32 /* float */ + 30 /* integer excluding x3, x4 */
 238 };
 239 
 240 // Save off registers which might be killed by calls into the runtime.
 241 // Tries to smart of about FPU registers.  In particular we separate
 242 // saving and describing the FPU registers for deoptimization since we
 243 // have to save the FPU registers twice if we describe them.  The
 244 // deopt blob is the only thing which needs to describe FPU registers.
 245 // In all other cases it should be sufficient to simply save their
 246 // current value.
 247 
 248 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
 249 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
 250 
 251 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
 252   int frame_size_in_bytes = reg_save_frame_size * BytesPerWord;
 253   sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
 254   int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
 255   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 256   assert_cond(oop_map != nullptr);
 257 
 258   // caller save registers only, see FrameMap::initialize
 259   // in c1_FrameMap_riscv.cpp for detail.
 260   const static Register caller_save_cpu_regs[FrameMap::max_nof_caller_save_cpu_regs] = {
 261     x7, x10, x11, x12, x13, x14, x15, x16, x17, x28, x29, x30, x31
 262   };
 263 
 264   for (int i = 0; i < FrameMap::max_nof_caller_save_cpu_regs; i++) {
 265     Register r = caller_save_cpu_regs[i];
 266     int sp_offset = cpu_reg_save_offsets[r->encoding()];
 267     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
 268                               r->as_VMReg());
 269   }
 270 
 271   // fpu_regs
 272   if (save_fpu_registers) {
 273     for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
 274       FloatRegister r = as_FloatRegister(i);
 275       int sp_offset = fpu_reg_save_offsets[i];
 276       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
 277                                 r->as_VMReg());
 278     }
 279   }
 280   return oop_map;
 281 }
 282 
 283 static OopMap* save_live_registers(StubAssembler* sasm,
 284                                    bool save_fpu_registers = true) {
 285   __ block_comment("save_live_registers");
 286 
 287   // if the number of pushed regs is odd, one slot will be reserved for alignment
 288   __ push_reg(RegSet::range(x5, x31), sp);    // integer registers except ra(x1) & sp(x2) & gp(x3) & tp(x4)
 289 
 290   if (save_fpu_registers) {
 291     // float registers
 292     __ addi(sp, sp, -(FrameMap::nof_fpu_regs * wordSize));
 293     for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
 294       __ fsd(as_FloatRegister(i), Address(sp, i * wordSize));
 295     }
 296   } else {
 297     // we define reg_save_layout = 62 as the fixed frame size,
 298     // we should also sub 32 * wordSize to sp when save_fpu_registers == false
 299     __ addi(sp, sp, -32 * wordSize);
 300   }
 301 
 302   return generate_oop_map(sasm, save_fpu_registers);
 303 }
 304 
 305 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
 306   if (restore_fpu_registers) {
 307     for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
 308       __ fld(as_FloatRegister(i), Address(sp, i * wordSize));
 309     }
 310     __ addi(sp, sp, FrameMap::nof_fpu_regs * wordSize);
 311   } else {
 312     // we define reg_save_layout = 64 as the fixed frame size,
 313     // we should also add 32 * wordSize to sp when save_fpu_registers == false
 314     __ addi(sp, sp, 32 * wordSize);
 315   }
 316 
 317   // if the number of popped regs is odd, the reserved slot for alignment will be removed
 318   __ pop_reg(RegSet::range(x5, x31), sp);   // integer registers except ra(x1) & sp(x2) & gp(x3) & tp(x4)
 319 }
 320 
 321 static void restore_live_registers_except_r10(StubAssembler* sasm, bool restore_fpu_registers = true) {
 322   if (restore_fpu_registers) {
 323     for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
 324       __ fld(as_FloatRegister(i), Address(sp, i * wordSize));
 325     }
 326     __ addi(sp, sp, FrameMap::nof_fpu_regs * wordSize);
 327   } else {
 328     // we define reg_save_layout = 64 as the fixed frame size,
 329     // we should also add 32 * wordSize to sp when save_fpu_registers == false
 330     __ addi(sp, sp, 32 * wordSize);
 331   }
 332 
 333   // pop integer registers except ra(x1) & sp(x2) & gp(x3) & tp(x4) & x10
 334   // there is one reserved slot for alignment on the stack in save_live_registers().
 335   __ pop_reg(RegSet::range(x5, x9), sp);   // pop x5 ~ x9 with the reserved slot for alignment
 336   __ pop_reg(RegSet::range(x11, x31), sp); // pop x11 ~ x31; x10 will be automatically skipped here
 337 }
 338 
 339 void Runtime1::initialize_pd() {
 340   int i = 0;
 341   int sp_offset = 0;
 342   const int step = 2; // SP offsets are in halfwords
 343 
 344   // all float registers are saved explicitly
 345   for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
 346     fpu_reg_save_offsets[i] = sp_offset;
 347     sp_offset += step;
 348   }
 349 
 350   // a slot reserved for stack 16-byte alignment, see MacroAssembler::push_reg
 351   sp_offset += step;
 352   // we save x5 ~ x31, except x0 ~ x4: loop starts from x5
 353   for (i = 5; i < FrameMap::nof_cpu_regs; i++) {
 354     cpu_reg_save_offsets[i] = sp_offset;
 355     sp_offset += step;
 356   }
 357 }
 358 
 359 // target: the entry point of the method that creates and posts the exception oop
 360 // has_argument: true if the exception needs arguments (passed in t0 and t1)
 361 
 362 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
 363   // make a frame and preserve the caller's caller-save registers
 364   OopMap* oop_map = save_live_registers(sasm);
 365   assert_cond(oop_map != nullptr);
 366   int call_offset = 0;
 367   if (!has_argument) {
 368     call_offset = __ call_RT(noreg, noreg, target);
 369   } else {
 370     __ mv(c_rarg1, t0);
 371     __ mv(c_rarg2, t1);
 372     call_offset = __ call_RT(noreg, noreg, target);
 373   }
 374   OopMapSet* oop_maps = new OopMapSet();
 375   assert_cond(oop_maps != nullptr);
 376   oop_maps->add_gc_map(call_offset, oop_map);
 377 
 378   return oop_maps;
 379 }
 380 
 381 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
 382   __ block_comment("generate_handle_exception");
 383 
 384   // incoming parameters
 385   const Register exception_oop = x10;
 386   const Register exception_pc  = x13;
 387 
 388   OopMapSet* oop_maps = new OopMapSet();
 389   assert_cond(oop_maps != nullptr);
 390   OopMap* oop_map = nullptr;
 391 
 392   switch (id) {
 393     case forward_exception_id:
 394       // We're handling an exception in the context of a compiled frame.
 395       // The registers have been saved in the standard places.  Perform
 396       // an exception lookup in the caller and dispatch to the handler
 397       // if found.  Otherwise unwind and dispatch to the callers
 398       // exception handler.
 399       oop_map = generate_oop_map(sasm, 1 /* thread */);
 400 
 401       // load and clear pending exception oop into x10
 402       __ ld(exception_oop, Address(xthread, Thread::pending_exception_offset()));
 403       __ sd(zr, Address(xthread, Thread::pending_exception_offset()));
 404 
 405       // load issuing PC (the return address for this stub) into x13
 406       __ ld(exception_pc, Address(fp, frame::return_addr_offset * BytesPerWord));
 407 
 408       // make sure that the vm_results are cleared (may be unnecessary)
 409       __ sd(zr, Address(xthread, JavaThread::vm_result_offset()));
 410       __ sd(zr, Address(xthread, JavaThread::vm_result_2_offset()));
 411       break;
 412     case handle_exception_nofpu_id:
 413     case handle_exception_id:
 414       // At this point all registers MAY be live.
 415       oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);
 416       break;
 417     case handle_exception_from_callee_id: {
 418       // At this point all registers except exception oop (x10) and
 419       // exception pc (ra) are dead.
 420       const int frame_size = 2 /* fp, return address */;
 421       oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
 422       sasm->set_frame_size(frame_size);
 423       break;
 424     }
 425     default: ShouldNotReachHere();
 426   }
 427 
 428   // verify that only x10 and x13 are valid at this time
 429   __ invalidate_registers(false, true, true, false, true, true);
 430   // verify that x10 contains a valid exception
 431   __ verify_not_null_oop(exception_oop);
 432 
 433 #ifdef ASSERT
 434   // check that fields in JavaThread for exception oop and issuing pc are
 435   // empty before writing to them
 436   Label oop_empty;
 437   __ ld(t0, Address(xthread, JavaThread::exception_oop_offset()));
 438   __ beqz(t0, oop_empty);
 439   __ stop("exception oop already set");
 440   __ bind(oop_empty);
 441 
 442   Label pc_empty;
 443   __ ld(t0, Address(xthread, JavaThread::exception_pc_offset()));
 444   __ beqz(t0, pc_empty);
 445   __ stop("exception pc already set");
 446   __ bind(pc_empty);
 447 #endif
 448 
 449   // save exception oop and issuing pc into JavaThread
 450   // (exception handler will load it from here)
 451   __ sd(exception_oop, Address(xthread, JavaThread::exception_oop_offset()));
 452   __ sd(exception_pc, Address(xthread, JavaThread::exception_pc_offset()));
 453 
 454   // patch throwing pc into return address (has bci & oop map)
 455   __ sd(exception_pc, Address(fp, frame::return_addr_offset * BytesPerWord));
 456 
 457   // compute the exception handler.
 458   // the exception oop and the throwing pc are read from the fields in JavaThread
 459   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
 460   guarantee(oop_map != nullptr, "null oop_map!");
 461   oop_maps->add_gc_map(call_offset, oop_map);
 462 
 463   // x10: handler address
 464   //      will be the deopt blob if nmethod was deoptimized while we looked up
 465   //      handler regardless of whether handler existed in the nmethod.
 466 
 467   // only x10 is valid at this time, all other registers have been destroyed by the runtime call
 468   __ invalidate_registers(false, true, true, true, true, true);
 469 
 470   // patch the return address, this stub will directly return to the exception handler
 471   __ sd(x10, Address(fp, frame::return_addr_offset * BytesPerWord));
 472 
 473   switch (id) {
 474     case forward_exception_id:
 475     case handle_exception_nofpu_id:
 476     case handle_exception_id:
 477       // Restore the registers that were saved at the beginning.
 478       restore_live_registers(sasm, id != handle_exception_nofpu_id);
 479       break;
 480     case handle_exception_from_callee_id:
 481       break;
 482     default: ShouldNotReachHere();
 483   }
 484 
 485   return oop_maps;
 486 }
 487 
 488 
 489 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
 490   // incoming parameters
 491   const Register exception_oop = x10;
 492   // other registers used in this stub
 493   const Register handler_addr = x11;
 494 
 495   if (AbortVMOnException) {
 496     __ enter();
 497     save_live_registers(sasm);
 498     __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), x10);
 499     restore_live_registers(sasm);
 500     __ leave();
 501   }
 502 
 503   // verify that only x10, is valid at this time
 504   __ invalidate_registers(false, true, true, true, true, true);
 505 
 506 #ifdef ASSERT
 507   // check that fields in JavaThread for exception oop and issuing pc are empty
 508   Label oop_empty;
 509   __ ld(t0, Address(xthread, JavaThread::exception_oop_offset()));
 510   __ beqz(t0, oop_empty);
 511   __ stop("exception oop must be empty");
 512   __ bind(oop_empty);
 513 
 514   Label pc_empty;
 515   __ ld(t0, Address(xthread, JavaThread::exception_pc_offset()));
 516   __ beqz(t0, pc_empty);
 517   __ stop("exception pc must be empty");
 518   __ bind(pc_empty);
 519 #endif
 520 
 521   // Save our return address because
 522   // exception_handler_for_return_address will destroy it.  We also
 523   // save exception_oop
 524   __ addi(sp, sp, -2 * wordSize);
 525   __ sd(exception_oop, Address(sp, wordSize));
 526   __ sd(ra, Address(sp));
 527 
 528   // search the exception handler address of the caller (using the return address)
 529   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), xthread, ra);
 530   // x10: exception handler address of the caller
 531 
 532   // Only x10 is valid at this time; all other registers have been
 533   // destroyed by the call.
 534   __ invalidate_registers(false, true, true, true, false, true);
 535 
 536   // move result of call into correct register
 537   __ mv(handler_addr, x10);
 538 
 539   // get throwing pc (= return address).
 540   // ra has been destroyed by the call
 541   __ ld(ra, Address(sp));
 542   __ ld(exception_oop, Address(sp, wordSize));
 543   __ addi(sp, sp, 2 * wordSize);
 544   __ mv(x13, ra);
 545 
 546   __ verify_not_null_oop(exception_oop);
 547 
 548   // continue at exception handler (return address removed)
 549   // note: do *not* remove arguments when unwinding the
 550   //       activation since the caller assumes having
 551   //       all arguments on the stack when entering the
 552   //       runtime to determine the exception handler
 553   //       (GC happens at call site with arguments!)
 554   // x10: exception oop
 555   // x13: throwing pc
 556   // x11: exception handler
 557   __ jr(handler_addr);
 558 }
 559 
 560 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 561   // use the maximum number of runtime-arguments here because it is difficult to
 562   // distinguish each RT-Call.
 563   // Note: This number affects also the RT-Call in generate_handle_exception because
 564   //       the oop-map is shared for all calls.
 565   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 566   assert(deopt_blob != nullptr, "deoptimization blob must have been created");
 567 
 568   OopMap* oop_map = save_live_registers(sasm);
 569   assert_cond(oop_map != nullptr);
 570 
 571   __ mv(c_rarg0, xthread);
 572   Label retaddr;
 573   __ set_last_Java_frame(sp, fp, retaddr, t0);
 574   // do the call
 575   __ rt_call(target);
 576   __ bind(retaddr);
 577   OopMapSet* oop_maps = new OopMapSet();
 578   assert_cond(oop_maps != nullptr);
 579   oop_maps->add_gc_map(__ offset(), oop_map);
 580   // verify callee-saved register
 581 #ifdef ASSERT
 582   { Label L;
 583     __ get_thread(t0);
 584     __ beq(xthread, t0, L);
 585     __ stop("StubAssembler::call_RT: xthread not callee saved?");
 586     __ bind(L);
 587   }
 588 #endif
 589   __ reset_last_Java_frame(true);
 590 
 591 #ifdef ASSERT
 592   // Check that fields in JavaThread for exception oop and issuing pc are empty
 593   Label oop_empty;
 594   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
 595   __ beqz(t0, oop_empty);
 596   __ stop("exception oop must be empty");
 597   __ bind(oop_empty);
 598 
 599   Label pc_empty;
 600   __ ld(t0, Address(xthread, JavaThread::exception_pc_offset()));
 601   __ beqz(t0, pc_empty);
 602   __ stop("exception pc must be empty");
 603   __ bind(pc_empty);
 604 #endif
 605 
 606   // Runtime will return true if the nmethod has been deoptimized, this is the
 607   // expected scenario and anything else is an error. Note that we maintain a
 608   // check on the result purely as a defensive measure.
 609   Label no_deopt;
 610   __ beqz(x10, no_deopt);                                // Have we deoptimized?
 611 
 612   // Perform a re-execute. The proper return address is already on the stack,
 613   // we just need to restore registers, pop all of our frames but the return
 614   // address and jump to the deopt blob.
 615 
 616   restore_live_registers(sasm);
 617   __ leave();
 618   __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
 619 
 620   __ bind(no_deopt);
 621   __ stop("deopt not performed");
 622 
 623   return oop_maps;
 624 }
 625 
 626 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 627   // for better readability
 628   const bool dont_gc_arguments = false;
 629 
 630   // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
 631   bool save_fpu_registers = true;
 632 
 633   // stub code & info for the different stubs
 634   OopMapSet* oop_maps = nullptr;
 635   switch (id) {
 636     {
 637     case forward_exception_id:
 638       {
 639         oop_maps = generate_handle_exception(id, sasm);
 640         __ leave();
 641         __ ret();
 642       }
 643       break;
 644 
 645     case throw_div0_exception_id:
 646       {
 647         StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
 648         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
 649       }
 650       break;
 651 
 652     case throw_null_pointer_exception_id:
 653       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
 654         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
 655       }
 656       break;
 657 
 658     case new_instance_id:
 659     case fast_new_instance_id:
 660     case fast_new_instance_init_check_id:
 661       {
 662         Register klass = x13; // Incoming
 663         Register obj   = x10; // Result
 664 
 665         if (id == new_instance_id) {
 666           __ set_info("new_instance", dont_gc_arguments);
 667         } else if (id == fast_new_instance_id) {
 668           __ set_info("fast new_instance", dont_gc_arguments);
 669         } else {
 670           assert(id == fast_new_instance_init_check_id, "bad StubID");
 671           __ set_info("fast new_instance init check", dont_gc_arguments);
 672         }
 673 
 674         __ enter();
 675         OopMap* map = save_live_registers(sasm);
 676         assert_cond(map != nullptr);
 677         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
 678         oop_maps = new OopMapSet();
 679         assert_cond(oop_maps != nullptr);
 680         oop_maps->add_gc_map(call_offset, map);
 681         restore_live_registers_except_r10(sasm);
 682         __ verify_oop(obj);
 683         __ leave();
 684         __ ret();
 685 
 686         // x10: new instance
 687       }
 688 
 689       break;
 690 
 691     case counter_overflow_id:
 692       {
 693         Register bci = x10;
 694         Register method = x11;
 695         __ enter();
 696         OopMap* map = save_live_registers(sasm);
 697         assert_cond(map != nullptr);
 698 
 699         const int bci_off = 0;
 700         const int method_off = 1;
 701         // Retrieve bci
 702         __ lw(bci, Address(fp, bci_off * BytesPerWord));
 703         // And a pointer to the Method*
 704         __ ld(method, Address(fp, method_off * BytesPerWord));
 705         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
 706         oop_maps = new OopMapSet();
 707         assert_cond(oop_maps != nullptr);
 708         oop_maps->add_gc_map(call_offset, map);
 709         restore_live_registers(sasm);
 710         __ leave();
 711         __ ret();
 712       }
 713       break;
 714 
 715     case new_type_array_id:
 716     case new_object_array_id:
 717       {
 718         Register length   = x9;  // Incoming
 719         Register klass    = x13; // Incoming
 720         Register obj      = x10; // Result
 721 
 722         if (id == new_type_array_id) {
 723           __ set_info("new_type_array", dont_gc_arguments);
 724         } else {
 725           __ set_info("new_object_array", dont_gc_arguments);
 726         }
 727 
 728 #ifdef ASSERT
 729         // assert object type is really an array of the proper kind
 730         {
 731           Label ok;
 732           Register tmp = obj;
 733           __ lwu(tmp, Address(klass, Klass::layout_helper_offset()));
 734           __ sraiw(tmp, tmp, Klass::_lh_array_tag_shift);
 735           int tag = ((id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value);
 736           __ mv(t0, tag);
 737           __ beq(t0, tmp, ok);
 738           __ stop("assert(is an array klass)");
 739           __ should_not_reach_here();
 740           __ bind(ok);
 741         }
 742 #endif // ASSERT
 743 
 744         __ enter();
 745         OopMap* map = save_live_registers(sasm);
 746         assert_cond(map != nullptr);
 747         int call_offset = 0;
 748         if (id == new_type_array_id) {
 749           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
 750         } else {
 751           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
 752         }
 753 
 754         oop_maps = new OopMapSet();
 755         assert_cond(oop_maps != nullptr);
 756         oop_maps->add_gc_map(call_offset, map);
 757         restore_live_registers_except_r10(sasm);
 758 
 759         __ verify_oop(obj);
 760         __ leave();
 761         __ ret();
 762 
 763         // x10: new array
 764       }
 765       break;
 766 
 767     case new_multi_array_id:
 768       {
 769         StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
 770         // x10: klass
 771         // x9: rank
 772         // x12: address of 1st dimension
 773         OopMap* map = save_live_registers(sasm);
 774         assert_cond(map != nullptr);
 775         __ mv(c_rarg1, x10);
 776         __ mv(c_rarg3, x12);
 777         __ mv(c_rarg2, x9);
 778         int call_offset = __ call_RT(x10, noreg, CAST_FROM_FN_PTR(address, new_multi_array), x11, x12, x13);
 779 
 780         oop_maps = new OopMapSet();
 781         assert_cond(oop_maps != nullptr);
 782         oop_maps->add_gc_map(call_offset, map);
 783         restore_live_registers_except_r10(sasm);
 784 
 785         // x10: new multi array
 786         __ verify_oop(x10);
 787       }
 788       break;
 789 
 790     case register_finalizer_id:
 791       {
 792         __ set_info("register_finalizer", dont_gc_arguments);
 793 
 794         // This is called via call_runtime so the arguments
 795         // will be place in C abi locations
 796         __ verify_oop(c_rarg0);
 797 
 798         // load the klass and check the has finalizer flag
 799         Label register_finalizer;
 800         Register t = x15;
 801         __ load_klass(t, x10);
 802         __ lwu(t, Address(t, Klass::access_flags_offset()));
 803         __ test_bit(t0, t, exact_log2(JVM_ACC_HAS_FINALIZER));
 804         __ bnez(t0, register_finalizer);
 805         __ ret();
 806 
 807         __ bind(register_finalizer);
 808         __ enter();
 809         OopMap* oop_map = save_live_registers(sasm);
 810         assert_cond(oop_map != nullptr);
 811         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), x10);
 812         oop_maps = new OopMapSet();
 813         assert_cond(oop_maps != nullptr);
 814         oop_maps->add_gc_map(call_offset, oop_map);
 815 
 816         // Now restore all the live registers
 817         restore_live_registers(sasm);
 818 
 819         __ leave();
 820         __ ret();
 821       }
 822       break;
 823 
 824     case throw_class_cast_exception_id:
 825       {
 826         StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
 827         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
 828       }
 829       break;
 830 
 831     case throw_incompatible_class_change_error_id:
 832       {
 833         StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);
 834         oop_maps = generate_exception_throw(sasm,
 835                                             CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
 836       }
 837       break;
 838 
 839     case slow_subtype_check_id:
 840       {
 841         // Typical calling sequence:
 842         // push klass_RInfo (object klass or other subclass)
 843         // push sup_k_RInfo (array element klass or other superclass)
 844         // jump to slow_subtype_check
 845         // Note that the subclass is pushed first, and is therefore deepest.
 846         enum layout {
 847           x10_off, x10_off_hi,
 848           x12_off, x12_off_hi,
 849           x14_off, x14_off_hi,
 850           x15_off, x15_off_hi,
 851           sup_k_off, sup_k_off_hi,
 852           klass_off, klass_off_hi,
 853           framesize,
 854           result_off = sup_k_off
 855         };
 856 
 857         __ set_info("slow_subtype_check", dont_gc_arguments);
 858         __ push_reg(RegSet::of(x10, x12, x14, x15), sp);
 859 
 860         __ ld(x14, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // sub klass
 861         __ ld(x10, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // super klass
 862 
 863         Label miss;
 864         __ check_klass_subtype_slow_path(x14, x10, x12, x15, nullptr, &miss);
 865 
 866         // fallthrough on success:
 867         __ mv(t0, 1);
 868         __ sd(t0, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
 869         __ pop_reg(RegSet::of(x10, x12, x14, x15), sp);
 870         __ ret();
 871 
 872         __ bind(miss);
 873         __ sd(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
 874         __ pop_reg(RegSet::of(x10, x12, x14, x15), sp);
 875         __ ret();
 876       }
 877       break;
 878 
 879     case monitorenter_nofpu_id:
 880       save_fpu_registers = false;
 881       // fall through
 882     case monitorenter_id:
 883       {
 884         StubFrame f(sasm, "monitorenter", dont_gc_arguments);
 885         OopMap* map = save_live_registers(sasm, save_fpu_registers);
 886         assert_cond(map != nullptr);
 887 
 888         // Called with store_parameter and not C abi
 889         f.load_argument(1, x10); // x10: object
 890         f.load_argument(0, x11); // x11: lock address
 891 
 892         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), x10, x11);
 893 
 894         oop_maps = new OopMapSet();
 895         assert_cond(oop_maps != nullptr);
 896         oop_maps->add_gc_map(call_offset, map);
 897         restore_live_registers(sasm, save_fpu_registers);
 898       }
 899       break;
 900 
 901     case monitorexit_nofpu_id:
 902       save_fpu_registers = false;
 903       // fall through
 904     case monitorexit_id:
 905       {
 906         StubFrame f(sasm, "monitorexit", dont_gc_arguments);
 907         OopMap* map = save_live_registers(sasm, save_fpu_registers);
 908         assert_cond(map != nullptr);
 909 
 910         // Called with store_parameter and not C abi
 911         f.load_argument(0, x10); // x10: lock address
 912 
 913         // note: really a leaf routine but must setup last java sp
 914         //       => use call_RT for now (speed can be improved by
 915         //       doing last java sp setup manually)
 916         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), x10);
 917 
 918         oop_maps = new OopMapSet();
 919         assert_cond(oop_maps != nullptr);
 920         oop_maps->add_gc_map(call_offset, map);
 921         restore_live_registers(sasm, save_fpu_registers);
 922       }
 923       break;
 924 
 925     case deoptimize_id:
 926       {
 927         StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return);
 928         OopMap* oop_map = save_live_registers(sasm);
 929         assert_cond(oop_map != nullptr);
 930         f.load_argument(0, c_rarg1);
 931         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1);
 932 
 933         oop_maps = new OopMapSet();
 934         assert_cond(oop_maps != nullptr);
 935         oop_maps->add_gc_map(call_offset, oop_map);
 936         restore_live_registers(sasm);
 937         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 938         assert(deopt_blob != nullptr, "deoptimization blob must have been created");
 939         __ leave();
 940         __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
 941       }
 942       break;
 943 
 944     case throw_range_check_failed_id:
 945       {
 946         StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return);
 947         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
 948       }
 949       break;
 950 
 951     case unwind_exception_id:
 952       {
 953         __ set_info("unwind_exception", dont_gc_arguments);
 954         // note: no stubframe since we are about to leave the current
 955         //       activation and we are calling a leaf VM function only.
 956         generate_unwind_exception(sasm);
 957       }
 958       break;
 959 
 960     case access_field_patching_id:
 961       {
 962         StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return);
 963         // we should set up register map
 964         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
 965       }
 966       break;
 967 
 968     case load_klass_patching_id:
 969       {
 970         StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return);
 971         // we should set up register map
 972         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
 973       }
 974       break;
 975 
 976     case load_mirror_patching_id:
 977       {
 978         StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return);
 979         // we should set up register map
 980         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
 981       }
 982       break;
 983 
 984     case load_appendix_patching_id:
 985       {
 986         StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return);
 987         // we should set up register map
 988         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
 989       }
 990       break;
 991 
 992     case handle_exception_nofpu_id:
 993     case handle_exception_id:
 994       {
 995         StubFrame f(sasm, "handle_exception", dont_gc_arguments);
 996         oop_maps = generate_handle_exception(id, sasm);
 997       }
 998       break;
 999 
1000     case handle_exception_from_callee_id:
1001       {
1002         StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1003         oop_maps = generate_handle_exception(id, sasm);
1004       }
1005       break;
1006 
1007     case throw_index_exception_id:
1008       {
1009         StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return);
1010         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1011       }
1012       break;
1013 
1014     case throw_array_store_exception_id:
1015       {
1016         StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return);
1017         // tos + 0: link
1018         //     + 1: return address
1019         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1020       }
1021       break;
1022 
1023     case predicate_failed_trap_id:
1024       {
1025         StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return);
1026 
1027         OopMap* map = save_live_registers(sasm);
1028         assert_cond(map != nullptr);
1029 
1030         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1031         oop_maps = new OopMapSet();
1032         assert_cond(oop_maps != nullptr);
1033         oop_maps->add_gc_map(call_offset, map);
1034         restore_live_registers(sasm);
1035         __ leave();
1036         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1037         assert(deopt_blob != nullptr, "deoptimization blob must have been created");
1038 
1039         __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1040       }
1041       break;
1042 
1043     case dtrace_object_alloc_id:
1044       { // c_rarg0: object
1045         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1046         save_live_registers(sasm);
1047 
1048         __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), c_rarg0);
1049 
1050         restore_live_registers(sasm);
1051       }
1052       break;
1053 
1054     default:
1055       {
1056         StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1057         __ mv(x10, (int)id);
1058         __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), x10);
1059         __ should_not_reach_here();
1060       }
1061       break;
1062     }
1063   }
1064   return oop_maps;
1065 }
1066 
1067 #undef __
1068 
1069 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }