1 /* 2 * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "c1/c1_CodeStubs.hpp" 30 #include "c1/c1_FrameMap.hpp" 31 #include "c1/c1_LIRAssembler.hpp" 32 #include "c1/c1_MacroAssembler.hpp" 33 #include "c1/c1_Runtime1.hpp" 34 #include "classfile/javaClasses.hpp" 35 #include "nativeInst_riscv.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "vmreg_riscv.inline.hpp" 38 39 40 #define __ ce->masm()-> 41 42 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { 43 __ bind(_entry); 44 InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset()); 45 __ code_section()->relocate(__ pc(), safepoint_pc.rspec()); 46 __ la(t0, safepoint_pc.target()); 47 __ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset())); 48 49 assert(SharedRuntime::polling_page_return_handler_blob() != NULL, 50 "polling page return stub not created yet"); 51 address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); 52 53 __ far_jump(RuntimeAddress(stub)); 54 } 55 56 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 57 __ bind(_entry); 58 Metadata *m = _method->as_constant_ptr()->as_metadata(); 59 __ mov_metadata(t0, m); 60 ce->store_parameter(t0, 1); 61 ce->store_parameter(_bci, 0); 62 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 63 ce->add_call_info_here(_info); 64 ce->verify_oop_map(_info); 65 __ j(_continuation); 66 } 67 68 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 69 : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) { 70 assert(info != NULL, "must have info"); 71 _info = new CodeEmitInfo(info); 72 } 73 74 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) 75 : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) { 76 assert(info != NULL, "must have info"); 77 _info = new CodeEmitInfo(info); 78 } 79 80 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 81 __ bind(_entry); 82 if (_info->deoptimize_on_exception()) { 83 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 84 __ far_call(RuntimeAddress(a)); 85 ce->add_call_info_here(_info); 86 ce->verify_oop_map(_info); 87 debug_only(__ should_not_reach_here()); 88 return; 89 } 90 91 if (_index->is_cpu_register()) { 92 __ mv(t0, _index->as_register()); 93 } else { 94 __ mv(t0, _index->as_jint()); 95 } 96 Runtime1::StubID stub_id; 97 if (_throw_index_out_of_bounds_exception) { 98 stub_id = Runtime1::throw_index_exception_id; 99 } else { 100 assert(_array != NULL, "sanity"); 101 __ mv(t1, _array->as_pointer_register()); 102 stub_id = Runtime1::throw_range_check_failed_id; 103 } 104 int32_t off = 0; 105 __ la_patchable(ra, RuntimeAddress(Runtime1::entry_for(stub_id)), off); 106 __ jalr(ra, ra, off); 107 ce->add_call_info_here(_info); 108 ce->verify_oop_map(_info); 109 debug_only(__ should_not_reach_here()); 110 } 111 112 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 113 _info = new CodeEmitInfo(info); 114 } 115 116 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 117 __ bind(_entry); 118 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 119 __ far_call(RuntimeAddress(a)); 120 ce->add_call_info_here(_info); 121 ce->verify_oop_map(_info); 122 debug_only(__ should_not_reach_here()); 123 } 124 125 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 126 if (_offset != -1) { 127 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 128 } 129 __ bind(_entry); 130 __ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type)); 131 ce->add_call_info_here(_info); 132 ce->verify_oop_map(_info); 133 #ifdef ASSERT 134 __ should_not_reach_here(); 135 #endif 136 } 137 138 // Implementation of NewInstanceStub 139 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 140 _result = result; 141 _klass = klass; 142 _klass_reg = klass_reg; 143 _info = new CodeEmitInfo(info); 144 assert(stub_id == Runtime1::new_instance_id || 145 stub_id == Runtime1::fast_new_instance_id || 146 stub_id == Runtime1::fast_new_instance_init_check_id, 147 "need new_instance id"); 148 _stub_id = stub_id; 149 } 150 151 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 152 assert(__ rsp_offset() == 0, "frame size should be fixed"); 153 __ bind(_entry); 154 __ mv(x13, _klass_reg->as_register()); 155 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 156 ce->add_call_info_here(_info); 157 ce->verify_oop_map(_info); 158 assert(_result->as_register() == x10, "result must in x10"); 159 __ j(_continuation); 160 } 161 162 // Implementation of NewTypeArrayStub 163 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 164 _klass_reg = klass_reg; 165 _length = length; 166 _result = result; 167 _info = new CodeEmitInfo(info); 168 } 169 170 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 171 assert(__ rsp_offset() == 0, "frame size should be fixed"); 172 __ bind(_entry); 173 assert(_length->as_register() == x9, "length must in x9"); 174 assert(_klass_reg->as_register() == x13, "klass_reg must in x13"); 175 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); 176 ce->add_call_info_here(_info); 177 ce->verify_oop_map(_info); 178 assert(_result->as_register() == x10, "result must in x10"); 179 __ j(_continuation); 180 } 181 182 // Implementation of NewObjectArrayStub 183 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 184 _klass_reg = klass_reg; 185 _result = result; 186 _length = length; 187 _info = new CodeEmitInfo(info); 188 } 189 190 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 191 assert(__ rsp_offset() == 0, "frame size should be fixed"); 192 __ bind(_entry); 193 assert(_length->as_register() == x9, "length must in x9"); 194 assert(_klass_reg->as_register() == x13, "klass_reg must in x13"); 195 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); 196 ce->add_call_info_here(_info); 197 ce->verify_oop_map(_info); 198 assert(_result->as_register() == x10, "result must in x10"); 199 __ j(_continuation); 200 } 201 202 // Implementation of MonitorAccessStubs 203 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 204 : MonitorAccessStub(obj_reg, lock_reg) { 205 _info = new CodeEmitInfo(info); 206 } 207 208 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 209 assert(__ rsp_offset() == 0, "frame size should be fixed"); 210 __ bind(_entry); 211 ce->store_parameter(_obj_reg->as_register(), 1); 212 ce->store_parameter(_lock_reg->as_register(), 0); 213 Runtime1::StubID enter_id; 214 if (ce->compilation()->has_fpu_code()) { 215 enter_id = Runtime1::monitorenter_id; 216 } else { 217 enter_id = Runtime1::monitorenter_nofpu_id; 218 } 219 __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); 220 ce->add_call_info_here(_info); 221 ce->verify_oop_map(_info); 222 __ j(_continuation); 223 } 224 225 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 226 __ bind(_entry); 227 if (_compute_lock) { 228 // lock_reg was destroyed by fast unlocking attempt => recompute it 229 ce->monitor_address(_monitor_ix, _lock_reg); 230 } 231 ce->store_parameter(_lock_reg->as_register(), 0); 232 // note: non-blocking leaf routine => no call info needed 233 Runtime1::StubID exit_id; 234 if (ce->compilation()->has_fpu_code()) { 235 exit_id = Runtime1::monitorexit_id; 236 } else { 237 exit_id = Runtime1::monitorexit_nofpu_id; 238 } 239 __ la(ra, _continuation); 240 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); 241 } 242 243 // Implementation of patching: 244 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 245 // - Replace original code with a call to the stub 246 // At Runtime: 247 // - call to stub, jump to runtime 248 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 249 // - in runtime: after initializing class, restore original code, reexecute instruction 250 251 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 252 253 void PatchingStub::align_patch_site(MacroAssembler* masm) {} 254 255 void PatchingStub::emit_code(LIR_Assembler* ce) { 256 assert(false, "RISCV should not use C1 runtime patching"); 257 } 258 259 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 260 __ bind(_entry); 261 ce->store_parameter(_trap_request, 0); 262 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); 263 ce->add_call_info_here(_info); 264 DEBUG_ONLY(__ should_not_reach_here()); 265 } 266 267 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 268 address a = NULL; 269 if (_info->deoptimize_on_exception()) { 270 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 271 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 272 } else { 273 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 274 } 275 276 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 277 __ bind(_entry); 278 __ far_call(RuntimeAddress(a)); 279 ce->add_call_info_here(_info); 280 ce->verify_oop_map(_info); 281 debug_only(__ should_not_reach_here()); 282 } 283 284 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 285 assert(__ rsp_offset() == 0, "frame size should be fixed"); 286 287 __ bind(_entry); 288 // pass the object in a tmp register because all other registers 289 // must be preserved 290 if (_obj->is_cpu_register()) { 291 __ mv(t0, _obj->as_register()); 292 } 293 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, t1); 294 ce->add_call_info_here(_info); 295 debug_only(__ should_not_reach_here()); 296 } 297 298 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 299 // ---------------slow case: call to native----------------- 300 __ bind(_entry); 301 // Figure out where the args should go 302 // This should really convert the IntrinsicID to the Method* and signature 303 // but I don't know how to do that. 304 const int args_num = 5; 305 VMRegPair args[args_num]; 306 BasicType signature[args_num] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT }; 307 SharedRuntime::java_calling_convention(signature, args, args_num); 308 309 // push parameters 310 Register r[args_num]; 311 r[0] = src()->as_register(); 312 r[1] = src_pos()->as_register(); 313 r[2] = dst()->as_register(); 314 r[3] = dst_pos()->as_register(); 315 r[4] = length()->as_register(); 316 317 // next registers will get stored on the stack 318 for (int j = 0; j < args_num; j++) { 319 VMReg r_1 = args[j].first(); 320 if (r_1->is_stack()) { 321 int st_off = r_1->reg2stack() * wordSize; 322 __ sd(r[j], Address(sp, st_off)); 323 } else { 324 assert(r[j] == args[j].first()->as_Register(), "Wrong register for arg"); 325 } 326 } 327 328 ce->align_call(lir_static_call); 329 330 ce->emit_static_call_stub(); 331 if (ce->compilation()->bailed_out()) { 332 return; // CodeCache is full 333 } 334 Address resolve(SharedRuntime::get_resolve_static_call_stub(), 335 relocInfo::static_call_type); 336 address call = __ trampoline_call(resolve); 337 if (call == NULL) { 338 ce->bailout("trampoline stub overflow"); 339 return; 340 } 341 ce->add_call_info_here(info()); 342 343 #ifndef PRODUCT 344 if (PrintC1Statistics) { 345 __ la(t1, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 346 __ add_memory_int32(Address(t1), 1); 347 } 348 #endif 349 350 __ j(_continuation); 351 } 352 353 #undef __