1 /*
  2  * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_CodeStubs.hpp"
 27 #include "c1/c1_FrameMap.hpp"
 28 #include "c1/c1_LIRAssembler.hpp"
 29 #include "c1/c1_MacroAssembler.hpp"
 30 #include "c1/c1_Runtime1.hpp"
 31 #include "classfile/javaClasses.hpp"
 32 #include "nativeInst_x86.hpp"
 33 #include "runtime/objectMonitor.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "utilities/align.hpp"
 36 #include "utilities/macros.hpp"
 37 #include "vmreg_x86.inline.hpp"
 38 
 39 
 40 #define __ ce->masm()->
 41 
 42 #ifndef _LP64
 43 float ConversionStub::float_zero = 0.0;
 44 double ConversionStub::double_zero = 0.0;
 45 
 46 void ConversionStub::emit_code(LIR_Assembler* ce) {
 47   __ bind(_entry);
 48   assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
 49 
 50 
 51   if (input()->is_single_xmm()) {
 52     __ comiss(input()->as_xmm_float_reg(),
 53               ExternalAddress((address)&float_zero));
 54   } else if (input()->is_double_xmm()) {
 55     __ comisd(input()->as_xmm_double_reg(),
 56               ExternalAddress((address)&double_zero));
 57   } else {
 58     __ push(rax);
 59     __ ftst();
 60     __ fnstsw_ax();
 61     __ sahf();
 62     __ pop(rax);
 63   }
 64 
 65   Label NaN, do_return;
 66   __ jccb(Assembler::parity, NaN);
 67   __ jccb(Assembler::below, do_return);
 68 
 69   // input is > 0 -> return maxInt
 70   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
 71   __ decrement(result()->as_register());
 72   __ jmpb(do_return);
 73 
 74   // input is NaN -> return 0
 75   __ bind(NaN);
 76   __ xorptr(result()->as_register(), result()->as_register());
 77 
 78   __ bind(do_return);
 79   __ jmp(_continuation);
 80 }
 81 #endif // !_LP64
 82 
 83 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 84   __ bind(_entry);
 85   InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
 86 #ifdef _LP64
 87   __ lea(rscratch1, safepoint_pc);
 88   __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
 89 #else
 90   const Register tmp1 = rcx;
 91   const Register tmp2 = rdx;
 92   __ push(tmp1);
 93   __ push(tmp2);
 94 
 95   __ lea(tmp1, safepoint_pc);
 96   __ get_thread(tmp2);
 97   __ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1);
 98 
 99   __ pop(tmp2);
100   __ pop(tmp1);
101 #endif /* _LP64 */
102   assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
103          "polling page return stub not created yet");
104 
105   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
106   __ jump(RuntimeAddress(stub));
107 }
108 
109 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
110   __ bind(_entry);
111   Metadata *m = _method->as_constant_ptr()->as_metadata();
112   ce->store_parameter(m, 1);
113   ce->store_parameter(_bci, 0);
114   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
115   ce->add_call_info_here(_info);
116   ce->verify_oop_map(_info);
117   __ jmp(_continuation);
118 }
119 
120 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
121   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
122   assert(info != NULL, "must have info");
123   _info = new CodeEmitInfo(info);
124 }
125 
126 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
127   : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
128   assert(info != NULL, "must have info");
129   _info = new CodeEmitInfo(info);
130 }
131 
132 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
133   __ bind(_entry);
134   if (_info->deoptimize_on_exception()) {
135     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
136     __ call(RuntimeAddress(a));
137     ce->add_call_info_here(_info);
138     ce->verify_oop_map(_info);
139     debug_only(__ should_not_reach_here());
140     return;
141   }
142 
143   // pass the array index on stack because all registers must be preserved
144   if (_index->is_cpu_register()) {
145     ce->store_parameter(_index->as_register(), 0);
146   } else {
147     ce->store_parameter(_index->as_jint(), 0);
148   }
149   Runtime1::StubID stub_id;
150   if (_throw_index_out_of_bounds_exception) {
151     stub_id = Runtime1::throw_index_exception_id;
152   } else {
153     stub_id = Runtime1::throw_range_check_failed_id;
154     ce->store_parameter(_array->as_pointer_register(), 1);
155   }
156   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
157   ce->add_call_info_here(_info);
158   ce->verify_oop_map(_info);
159   debug_only(__ should_not_reach_here());
160 }
161 
162 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
163   _info = new CodeEmitInfo(info);
164 }
165 
166 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
167   __ bind(_entry);
168   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
169   __ call(RuntimeAddress(a));
170   ce->add_call_info_here(_info);
171   ce->verify_oop_map(_info);
172   debug_only(__ should_not_reach_here());
173 }
174 
175 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
176   if (_offset != -1) {
177     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
178   }
179   __ bind(_entry);
180   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
181   ce->add_call_info_here(_info);
182   debug_only(__ should_not_reach_here());
183 }
184 
185 
186 // Implementation of NewInstanceStub
187 
188 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
189   _result = result;
190   _klass = klass;
191   _klass_reg = klass_reg;
192   _info = new CodeEmitInfo(info);
193   assert(stub_id == Runtime1::new_instance_id                 ||
194          stub_id == Runtime1::fast_new_instance_id            ||
195          stub_id == Runtime1::fast_new_instance_init_check_id,
196          "need new_instance id");
197   _stub_id   = stub_id;
198 }
199 
200 
201 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
202   assert(__ rsp_offset() == 0, "frame size should be fixed");
203   __ bind(_entry);
204   __ movptr(rdx, _klass_reg->as_register());
205   __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
206   ce->add_call_info_here(_info);
207   ce->verify_oop_map(_info);
208   assert(_result->as_register() == rax, "result must in rax,");
209   __ jmp(_continuation);
210 }
211 
212 
213 // Implementation of NewTypeArrayStub
214 
215 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
216   _klass_reg = klass_reg;
217   _length = length;
218   _result = result;
219   _info = new CodeEmitInfo(info);
220 }
221 
222 
223 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
224   assert(__ rsp_offset() == 0, "frame size should be fixed");
225   __ bind(_entry);
226   assert(_length->as_register() == rbx, "length must in rbx,");
227   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
228   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
229   ce->add_call_info_here(_info);
230   ce->verify_oop_map(_info);
231   assert(_result->as_register() == rax, "result must in rax,");
232   __ jmp(_continuation);
233 }
234 
235 
236 // Implementation of NewObjectArrayStub
237 
238 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
239   _klass_reg = klass_reg;
240   _result = result;
241   _length = length;
242   _info = new CodeEmitInfo(info);
243 }
244 
245 
246 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
247   assert(__ rsp_offset() == 0, "frame size should be fixed");
248   __ bind(_entry);
249   assert(_length->as_register() == rbx, "length must in rbx,");
250   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
251   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
252   ce->add_call_info_here(_info);
253   ce->verify_oop_map(_info);
254   assert(_result->as_register() == rax, "result must in rax,");
255   __ jmp(_continuation);
256 }
257 
258 
259 // Implementation of MonitorAccessStubs
260 
261 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
262 : MonitorAccessStub(obj_reg, lock_reg)
263 {
264   _info = new CodeEmitInfo(info);
265 }
266 
267 
268 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
269   assert(__ rsp_offset() == 0, "frame size should be fixed");
270   __ bind(_entry);
271   ce->store_parameter(_obj_reg->as_register(),  1);
272   ce->store_parameter(_lock_reg->as_register(), 0);
273   Runtime1::StubID enter_id;
274   if (ce->compilation()->has_fpu_code()) {
275     enter_id = Runtime1::monitorenter_id;
276   } else {
277     enter_id = Runtime1::monitorenter_nofpu_id;
278   }
279   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
280   ce->add_call_info_here(_info);
281   ce->verify_oop_map(_info);
282   __ jmp(_continuation);
283 }
284 
285 
286 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
287   __ bind(_entry);
288   if (_compute_lock) {
289     // lock_reg was destroyed by fast unlocking attempt => recompute it
290     ce->monitor_address(_monitor_ix, _lock_reg);
291   }
292   ce->store_parameter(_lock_reg->as_register(), 0);
293   // note: non-blocking leaf routine => no call info needed
294   Runtime1::StubID exit_id;
295   if (ce->compilation()->has_fpu_code()) {
296     exit_id = Runtime1::monitorexit_id;
297   } else {
298     exit_id = Runtime1::monitorexit_nofpu_id;
299   }
300   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
301   __ jmp(_continuation);
302 }
303 
304 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
305   assert(UseCompactObjectHeaders, "only with compact headers");
306   __ bind(_entry);
307 #ifdef _LP64
308   Register d = _result->as_register();
309   __ movq(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
310   __ jmp(_continuation);
311 #else
312   __ should_not_reach_here();
313 #endif
314 }
315 
316 // Implementation of patching:
317 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
318 // - Replace original code with a call to the stub
319 // At Runtime:
320 // - call to stub, jump to runtime
321 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
322 // - in runtime: after initializing class, restore original code, reexecute instruction
323 
324 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
325 
326 void PatchingStub::align_patch_site(MacroAssembler* masm) {
327   // We're patching a 5-7 byte instruction on intel and we need to
328   // make sure that we don't see a piece of the instruction.  It
329   // appears mostly impossible on Intel to simply invalidate other
330   // processors caches and since they may do aggressive prefetch it's
331   // very hard to make a guess about what code might be in the icache.
332   // Force the instruction to be double word aligned so that it
333   // doesn't span a cache line.
334   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
335 }
336 
337 void PatchingStub::emit_code(LIR_Assembler* ce) {
338   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
339 
340   Label call_patch;
341 
342   // static field accesses have special semantics while the class
343   // initializer is being run so we emit a test which can be used to
344   // check that this code is being executed by the initializing
345   // thread.
346   address being_initialized_entry = __ pc();
347   if (CommentedAssembly) {
348     __ block_comment(" patch template");
349   }
350   if (_id == load_klass_id) {
351     // produce a copy of the load klass instruction for use by the being initialized case
352 #ifdef ASSERT
353     address start = __ pc();
354 #endif
355     Metadata* o = NULL;
356     __ mov_metadata(_obj, o);
357 #ifdef ASSERT
358     for (int i = 0; i < _bytes_to_copy; i++) {
359       address ptr = (address)(_pc_start + i);
360       int a_byte = (*ptr) & 0xFF;
361       assert(a_byte == *start++, "should be the same code");
362     }
363 #endif
364   } else if (_id == load_mirror_id) {
365     // produce a copy of the load mirror instruction for use by the being
366     // initialized case
367 #ifdef ASSERT
368     address start = __ pc();
369 #endif
370     jobject o = NULL;
371     __ movoop(_obj, o);
372 #ifdef ASSERT
373     for (int i = 0; i < _bytes_to_copy; i++) {
374       address ptr = (address)(_pc_start + i);
375       int a_byte = (*ptr) & 0xFF;
376       assert(a_byte == *start++, "should be the same code");
377     }
378 #endif
379   } else {
380     // make a copy the code which is going to be patched.
381     for (int i = 0; i < _bytes_to_copy; i++) {
382       address ptr = (address)(_pc_start + i);
383       int a_byte = (*ptr) & 0xFF;
384       __ emit_int8(a_byte);
385       *ptr = 0x90; // make the site look like a nop
386     }
387   }
388 
389   address end_of_patch = __ pc();
390   int bytes_to_skip = 0;
391   if (_id == load_mirror_id) {
392     int offset = __ offset();
393     if (CommentedAssembly) {
394       __ block_comment(" being_initialized check");
395     }
396     assert(_obj != noreg, "must be a valid register");
397     Register tmp = rax;
398     Register tmp2 = rbx;
399     __ push(tmp);
400     __ push(tmp2);
401     // Load without verification to keep code size small. We need it because
402     // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
403     __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset()));
404     __ get_thread(tmp);
405     __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
406     __ pop(tmp2);
407     __ pop(tmp);
408     __ jcc(Assembler::notEqual, call_patch);
409 
410     // access_field patches may execute the patched code before it's
411     // copied back into place so we need to jump back into the main
412     // code of the nmethod to continue execution.
413     __ jmp(_patch_site_continuation);
414 
415     // make sure this extra code gets skipped
416     bytes_to_skip += __ offset() - offset;
417   }
418   if (CommentedAssembly) {
419     __ block_comment("patch data encoded as movl");
420   }
421   // Now emit the patch record telling the runtime how to find the
422   // pieces of the patch.  We only need 3 bytes but for readability of
423   // the disassembly we make the data look like a movl reg, imm32,
424   // which requires 5 bytes
425   int sizeof_patch_record = 5;
426   bytes_to_skip += sizeof_patch_record;
427 
428   // emit the offsets needed to find the code to patch
429   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
430 
431   __ emit_int8((unsigned char)0xB8);
432   __ emit_int8(0);
433   __ emit_int8(being_initialized_entry_offset);
434   __ emit_int8(bytes_to_skip);
435   __ emit_int8(_bytes_to_copy);
436   address patch_info_pc = __ pc();
437   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
438 
439   address entry = __ pc();
440   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
441   address target = NULL;
442   relocInfo::relocType reloc_type = relocInfo::none;
443   switch (_id) {
444     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
445     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
446     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
447     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
448     default: ShouldNotReachHere();
449   }
450   __ bind(call_patch);
451 
452   if (CommentedAssembly) {
453     __ block_comment("patch entry point");
454   }
455   __ call(RuntimeAddress(target));
456   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
457   ce->add_call_info_here(_info);
458   int jmp_off = __ offset();
459   __ jmp(_patch_site_entry);
460   // Add enough nops so deoptimization can overwrite the jmp above with a call
461   // and not destroy the world. We cannot use fat nops here, since the concurrent
462   // code rewrite may transiently create the illegal instruction sequence.
463   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
464     __ nop();
465   }
466   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
467     CodeSection* cs = __ code_section();
468     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
469     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
470   }
471 }
472 
473 
474 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
475   __ bind(_entry);
476   ce->store_parameter(_trap_request, 0);
477   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
478   ce->add_call_info_here(_info);
479   DEBUG_ONLY(__ should_not_reach_here());
480 }
481 
482 
483 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
484   address a;
485   if (_info->deoptimize_on_exception()) {
486     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
487     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
488   } else {
489     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
490   }
491 
492   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
493   __ bind(_entry);
494   __ call(RuntimeAddress(a));
495   ce->add_call_info_here(_info);
496   ce->verify_oop_map(_info);
497   debug_only(__ should_not_reach_here());
498 }
499 
500 
501 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
502   assert(__ rsp_offset() == 0, "frame size should be fixed");
503 
504   __ bind(_entry);
505   // pass the object on stack because all registers must be preserved
506   if (_obj->is_cpu_register()) {
507     ce->store_parameter(_obj->as_register(), 0);
508   }
509   __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
510   ce->add_call_info_here(_info);
511   debug_only(__ should_not_reach_here());
512 }
513 
514 
515 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
516   //---------------slow case: call to native-----------------
517   __ bind(_entry);
518   // Figure out where the args should go
519   // This should really convert the IntrinsicID to the Method* and signature
520   // but I don't know how to do that.
521   //
522   VMRegPair args[5];
523   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
524   SharedRuntime::java_calling_convention(signature, args, 5);
525 
526   // push parameters
527   // (src, src_pos, dest, destPos, length)
528   Register r[5];
529   r[0] = src()->as_register();
530   r[1] = src_pos()->as_register();
531   r[2] = dst()->as_register();
532   r[3] = dst_pos()->as_register();
533   r[4] = length()->as_register();
534 
535   // next registers will get stored on the stack
536   for (int i = 0; i < 5 ; i++ ) {
537     VMReg r_1 = args[i].first();
538     if (r_1->is_stack()) {
539       int st_off = r_1->reg2stack() * wordSize;
540       __ movptr (Address(rsp, st_off), r[i]);
541     } else {
542       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
543     }
544   }
545 
546   ce->align_call(lir_static_call);
547 
548   ce->emit_static_call_stub();
549   if (ce->compilation()->bailed_out()) {
550     return; // CodeCache is full
551   }
552   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
553                          relocInfo::static_call_type);
554   __ call(resolve);
555   ce->add_call_info_here(info());
556 
557 #ifndef PRODUCT
558   __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
559 #endif
560 
561   __ jmp(_continuation);
562 }
563 
564 #undef __