1 /*
  2  * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_CodeStubs.hpp"
 27 #include "c1/c1_FrameMap.hpp"
 28 #include "c1/c1_LIRAssembler.hpp"
 29 #include "c1/c1_MacroAssembler.hpp"
 30 #include "c1/c1_Runtime1.hpp"
 31 #include "classfile/javaClasses.hpp"
 32 #include "nativeInst_x86.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "utilities/align.hpp"
 35 #include "utilities/macros.hpp"
 36 #include "vmreg_x86.inline.hpp"
 37 
 38 
 39 #define __ ce->masm()->
 40 
 41 #ifndef _LP64
 42 float ConversionStub::float_zero = 0.0;
 43 double ConversionStub::double_zero = 0.0;
 44 
 45 void ConversionStub::emit_code(LIR_Assembler* ce) {
 46   __ bind(_entry);
 47   assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
 48 
 49 
 50   if (input()->is_single_xmm()) {
 51     __ comiss(input()->as_xmm_float_reg(),
 52               ExternalAddress((address)&float_zero));
 53   } else if (input()->is_double_xmm()) {
 54     __ comisd(input()->as_xmm_double_reg(),
 55               ExternalAddress((address)&double_zero));
 56   } else {
 57     __ push(rax);
 58     __ ftst();
 59     __ fnstsw_ax();
 60     __ sahf();
 61     __ pop(rax);
 62   }
 63 
 64   Label NaN, do_return;
 65   __ jccb(Assembler::parity, NaN);
 66   __ jccb(Assembler::below, do_return);
 67 
 68   // input is > 0 -> return maxInt
 69   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
 70   __ decrement(result()->as_register());
 71   __ jmpb(do_return);
 72 
 73   // input is NaN -> return 0
 74   __ bind(NaN);
 75   __ xorptr(result()->as_register(), result()->as_register());
 76 
 77   __ bind(do_return);
 78   __ jmp(_continuation);
 79 }
 80 #endif // !_LP64
 81 
 82 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 83   __ bind(_entry);
 84   InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
 85 #ifdef _LP64
 86   __ lea(rscratch1, safepoint_pc);
 87   __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
 88 #else
 89   const Register tmp1 = rcx;
 90   const Register tmp2 = rdx;
 91   __ push(tmp1);
 92   __ push(tmp2);
 93 
 94   __ lea(tmp1, safepoint_pc);
 95   __ get_thread(tmp2);
 96   __ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1);
 97 
 98   __ pop(tmp2);
 99   __ pop(tmp1);
100 #endif /* _LP64 */
101   assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
102          "polling page return stub not created yet");
103 
104   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
105   __ jump(RuntimeAddress(stub));
106 }
107 
108 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
109   __ bind(_entry);
110   Metadata *m = _method->as_constant_ptr()->as_metadata();
111   ce->store_parameter(m, 1);
112   ce->store_parameter(_bci, 0);
113   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
114   ce->add_call_info_here(_info);
115   ce->verify_oop_map(_info);
116   __ jmp(_continuation);
117 }
118 
119 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
120   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
121   assert(info != NULL, "must have info");
122   _info = new CodeEmitInfo(info);
123 }
124 
125 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
126   : _index(index), _array(), _throw_index_out_of_bounds_exception(true) {
127   assert(info != NULL, "must have info");
128   _info = new CodeEmitInfo(info);
129 }
130 
131 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
132   __ bind(_entry);
133   if (_info->deoptimize_on_exception()) {
134     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
135     __ call(RuntimeAddress(a));
136     ce->add_call_info_here(_info);
137     ce->verify_oop_map(_info);
138     debug_only(__ should_not_reach_here());
139     return;
140   }
141 
142   // pass the array index on stack because all registers must be preserved
143   if (_index->is_cpu_register()) {
144     ce->store_parameter(_index->as_register(), 0);
145   } else {
146     ce->store_parameter(_index->as_jint(), 0);
147   }
148   Runtime1::StubID stub_id;
149   if (_throw_index_out_of_bounds_exception) {
150     stub_id = Runtime1::throw_index_exception_id;
151   } else {
152     stub_id = Runtime1::throw_range_check_failed_id;
153     ce->store_parameter(_array->as_pointer_register(), 1);
154   }
155   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
156   ce->add_call_info_here(_info);
157   ce->verify_oop_map(_info);
158   debug_only(__ should_not_reach_here());
159 }
160 
161 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
162   _info = new CodeEmitInfo(info);
163 }
164 
165 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
166   __ bind(_entry);
167   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
168   __ call(RuntimeAddress(a));
169   ce->add_call_info_here(_info);
170   ce->verify_oop_map(_info);
171   debug_only(__ should_not_reach_here());
172 }
173 
174 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
175   if (_offset != -1) {
176     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
177   }
178   __ bind(_entry);
179   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
180   ce->add_call_info_here(_info);
181   debug_only(__ should_not_reach_here());
182 }
183 
184 
185 // Implementation of NewInstanceStub
186 
187 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
188   _result = result;
189   _klass = klass;
190   _klass_reg = klass_reg;
191   _info = new CodeEmitInfo(info);
192   assert(stub_id == Runtime1::new_instance_id                 ||
193          stub_id == Runtime1::fast_new_instance_id            ||
194          stub_id == Runtime1::fast_new_instance_init_check_id,
195          "need new_instance id");
196   _stub_id   = stub_id;
197 }
198 
199 
200 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
201   assert(__ rsp_offset() == 0, "frame size should be fixed");
202   __ bind(_entry);
203   __ movptr(rdx, _klass_reg->as_register());
204   __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
205   ce->add_call_info_here(_info);
206   ce->verify_oop_map(_info);
207   assert(_result->as_register() == rax, "result must in rax,");
208   __ jmp(_continuation);
209 }
210 
211 
212 // Implementation of NewTypeArrayStub
213 
214 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
215   _klass_reg = klass_reg;
216   _length = length;
217   _result = result;
218   _info = new CodeEmitInfo(info);
219 }
220 
221 
222 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
223   assert(__ rsp_offset() == 0, "frame size should be fixed");
224   __ bind(_entry);
225   assert(_length->as_register() == rbx, "length must in rbx,");
226   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
227   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
228   ce->add_call_info_here(_info);
229   ce->verify_oop_map(_info);
230   assert(_result->as_register() == rax, "result must in rax,");
231   __ jmp(_continuation);
232 }
233 
234 
235 // Implementation of NewObjectArrayStub
236 
237 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
238   _klass_reg = klass_reg;
239   _result = result;
240   _length = length;
241   _info = new CodeEmitInfo(info);
242 }
243 
244 
245 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
246   assert(__ rsp_offset() == 0, "frame size should be fixed");
247   __ bind(_entry);
248   assert(_length->as_register() == rbx, "length must in rbx,");
249   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
250   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
251   ce->add_call_info_here(_info);
252   ce->verify_oop_map(_info);
253   assert(_result->as_register() == rax, "result must in rax,");
254   __ jmp(_continuation);
255 }
256 
257 
258 // Implementation of MonitorAccessStubs
259 
260 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
261 : MonitorAccessStub(obj_reg, lock_reg)
262 {
263   _info = new CodeEmitInfo(info);
264 }
265 
266 
267 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
268   assert(__ rsp_offset() == 0, "frame size should be fixed");
269   __ bind(_entry);
270   ce->store_parameter(_obj_reg->as_register(),  1);
271   ce->store_parameter(_lock_reg->as_register(), 0);
272   Runtime1::StubID enter_id;
273   if (ce->compilation()->has_fpu_code()) {
274     enter_id = Runtime1::monitorenter_id;
275   } else {
276     enter_id = Runtime1::monitorenter_nofpu_id;
277   }
278   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
279   ce->add_call_info_here(_info);
280   ce->verify_oop_map(_info);
281   __ jmp(_continuation);
282 }
283 
284 
285 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
286   __ bind(_entry);
287   if (_compute_lock) {
288     // lock_reg was destroyed by fast unlocking attempt => recompute it
289     ce->monitor_address(_monitor_ix, _lock_reg);
290   }
291   ce->store_parameter(_lock_reg->as_register(), 0);
292   // note: non-blocking leaf routine => no call info needed
293   Runtime1::StubID exit_id;
294   if (ce->compilation()->has_fpu_code()) {
295     exit_id = Runtime1::monitorexit_id;
296   } else {
297     exit_id = Runtime1::monitorexit_nofpu_id;
298   }
299   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
300   __ jmp(_continuation);
301 }
302 
303 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
304   __ bind(_entry);
305 #ifdef _LP64
306   Register res = _result->as_register();
307   ce->store_parameter(_obj->as_register(), 0);
308   if (res != rax) {
309     // This preserves rax and allows it to be used as return-register,
310     // without messing with the stack.
311     __ xchgptr(rax, res);
312   }
313   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_klass_id)));
314   if (res != rax) {
315     // Swap back rax, and move result to correct register.
316     __ xchgptr(rax, res);
317   }
318   __ jmp(_continuation);
319 #else
320   __ should_not_reach_here();
321 #endif
322 }
323 
324 // Implementation of patching:
325 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
326 // - Replace original code with a call to the stub
327 // At Runtime:
328 // - call to stub, jump to runtime
329 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
330 // - in runtime: after initializing class, restore original code, reexecute instruction
331 
332 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
333 
334 void PatchingStub::align_patch_site(MacroAssembler* masm) {
335   // We're patching a 5-7 byte instruction on intel and we need to
336   // make sure that we don't see a piece of the instruction.  It
337   // appears mostly impossible on Intel to simply invalidate other
338   // processors caches and since they may do aggressive prefetch it's
339   // very hard to make a guess about what code might be in the icache.
340   // Force the instruction to be double word aligned so that it
341   // doesn't span a cache line.
342   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
343 }
344 
345 void PatchingStub::emit_code(LIR_Assembler* ce) {
346   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
347 
348   Label call_patch;
349 
350   // static field accesses have special semantics while the class
351   // initializer is being run so we emit a test which can be used to
352   // check that this code is being executed by the initializing
353   // thread.
354   address being_initialized_entry = __ pc();
355   if (CommentedAssembly) {
356     __ block_comment(" patch template");
357   }
358   if (_id == load_klass_id) {
359     // produce a copy of the load klass instruction for use by the being initialized case
360 #ifdef ASSERT
361     address start = __ pc();
362 #endif
363     Metadata* o = NULL;
364     __ mov_metadata(_obj, o);
365 #ifdef ASSERT
366     for (int i = 0; i < _bytes_to_copy; i++) {
367       address ptr = (address)(_pc_start + i);
368       int a_byte = (*ptr) & 0xFF;
369       assert(a_byte == *start++, "should be the same code");
370     }
371 #endif
372   } else if (_id == load_mirror_id) {
373     // produce a copy of the load mirror instruction for use by the being
374     // initialized case
375 #ifdef ASSERT
376     address start = __ pc();
377 #endif
378     jobject o = NULL;
379     __ movoop(_obj, o);
380 #ifdef ASSERT
381     for (int i = 0; i < _bytes_to_copy; i++) {
382       address ptr = (address)(_pc_start + i);
383       int a_byte = (*ptr) & 0xFF;
384       assert(a_byte == *start++, "should be the same code");
385     }
386 #endif
387   } else {
388     // make a copy the code which is going to be patched.
389     for (int i = 0; i < _bytes_to_copy; i++) {
390       address ptr = (address)(_pc_start + i);
391       int a_byte = (*ptr) & 0xFF;
392       __ emit_int8(a_byte);
393       *ptr = 0x90; // make the site look like a nop
394     }
395   }
396 
397   address end_of_patch = __ pc();
398   int bytes_to_skip = 0;
399   if (_id == load_mirror_id) {
400     int offset = __ offset();
401     if (CommentedAssembly) {
402       __ block_comment(" being_initialized check");
403     }
404     assert(_obj != noreg, "must be a valid register");
405     Register tmp = rax;
406     Register tmp2 = rbx;
407     __ push(tmp);
408     __ push(tmp2);
409     // Load without verification to keep code size small. We need it because
410     // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
411     __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset()));
412     __ get_thread(tmp);
413     __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
414     __ pop(tmp2);
415     __ pop(tmp);
416     __ jcc(Assembler::notEqual, call_patch);
417 
418     // access_field patches may execute the patched code before it's
419     // copied back into place so we need to jump back into the main
420     // code of the nmethod to continue execution.
421     __ jmp(_patch_site_continuation);
422 
423     // make sure this extra code gets skipped
424     bytes_to_skip += __ offset() - offset;
425   }
426   if (CommentedAssembly) {
427     __ block_comment("patch data encoded as movl");
428   }
429   // Now emit the patch record telling the runtime how to find the
430   // pieces of the patch.  We only need 3 bytes but for readability of
431   // the disassembly we make the data look like a movl reg, imm32,
432   // which requires 5 bytes
433   int sizeof_patch_record = 5;
434   bytes_to_skip += sizeof_patch_record;
435 
436   // emit the offsets needed to find the code to patch
437   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
438 
439   __ emit_int8((unsigned char)0xB8);
440   __ emit_int8(0);
441   __ emit_int8(being_initialized_entry_offset);
442   __ emit_int8(bytes_to_skip);
443   __ emit_int8(_bytes_to_copy);
444   address patch_info_pc = __ pc();
445   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
446 
447   address entry = __ pc();
448   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
449   address target = NULL;
450   relocInfo::relocType reloc_type = relocInfo::none;
451   switch (_id) {
452     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
453     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
454     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
455     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
456     default: ShouldNotReachHere();
457   }
458   __ bind(call_patch);
459 
460   if (CommentedAssembly) {
461     __ block_comment("patch entry point");
462   }
463   __ call(RuntimeAddress(target));
464   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
465   ce->add_call_info_here(_info);
466   int jmp_off = __ offset();
467   __ jmp(_patch_site_entry);
468   // Add enough nops so deoptimization can overwrite the jmp above with a call
469   // and not destroy the world. We cannot use fat nops here, since the concurrent
470   // code rewrite may transiently create the illegal instruction sequence.
471   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
472     __ nop();
473   }
474   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
475     CodeSection* cs = __ code_section();
476     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
477     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
478   }
479 }
480 
481 
482 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
483   __ bind(_entry);
484   ce->store_parameter(_trap_request, 0);
485   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
486   ce->add_call_info_here(_info);
487   DEBUG_ONLY(__ should_not_reach_here());
488 }
489 
490 
491 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
492   address a;
493   if (_info->deoptimize_on_exception()) {
494     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
495     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
496   } else {
497     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
498   }
499 
500   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
501   __ bind(_entry);
502   __ call(RuntimeAddress(a));
503   ce->add_call_info_here(_info);
504   ce->verify_oop_map(_info);
505   debug_only(__ should_not_reach_here());
506 }
507 
508 
509 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
510   assert(__ rsp_offset() == 0, "frame size should be fixed");
511 
512   __ bind(_entry);
513   // pass the object on stack because all registers must be preserved
514   if (_obj->is_cpu_register()) {
515     ce->store_parameter(_obj->as_register(), 0);
516   }
517   __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
518   ce->add_call_info_here(_info);
519   debug_only(__ should_not_reach_here());
520 }
521 
522 
523 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
524   //---------------slow case: call to native-----------------
525   __ bind(_entry);
526   // Figure out where the args should go
527   // This should really convert the IntrinsicID to the Method* and signature
528   // but I don't know how to do that.
529   //
530   VMRegPair args[5];
531   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
532   SharedRuntime::java_calling_convention(signature, args, 5);
533 
534   // push parameters
535   // (src, src_pos, dest, destPos, length)
536   Register r[5];
537   r[0] = src()->as_register();
538   r[1] = src_pos()->as_register();
539   r[2] = dst()->as_register();
540   r[3] = dst_pos()->as_register();
541   r[4] = length()->as_register();
542 
543   // next registers will get stored on the stack
544   for (int i = 0; i < 5 ; i++ ) {
545     VMReg r_1 = args[i].first();
546     if (r_1->is_stack()) {
547       int st_off = r_1->reg2stack() * wordSize;
548       __ movptr (Address(rsp, st_off), r[i]);
549     } else {
550       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
551     }
552   }
553 
554   ce->align_call(lir_static_call);
555 
556   ce->emit_static_call_stub();
557   if (ce->compilation()->bailed_out()) {
558     return; // CodeCache is full
559   }
560   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
561                          relocInfo::static_call_type);
562   __ call(resolve);
563   ce->add_call_info_here(info());
564 
565 #ifndef PRODUCT
566   if (PrintC1Statistics) {
567     __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
568   }
569 #endif
570 
571   __ jmp(_continuation);
572 }
573 
574 #undef __