1 /*
  2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "code/compiledIC.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "nativeInst_aarch64.hpp"
 33 #include "oops/oop.inline.hpp"
 34 #include "runtime/handles.hpp"
 35 #include "runtime/orderAccess.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "runtime/stubRoutines.hpp"
 38 #include "utilities/ostream.hpp"
 39 #ifdef COMPILER1
 40 #include "c1/c1_Runtime1.hpp"
 41 #endif
 42 
 43 void NativeCall::verify() {
 44   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
 45 }
 46 
 47 void NativeInstruction::wrote(int offset) {
 48   ICache::invalidate_word(addr_at(offset));
 49 }
 50 
 51 void NativeLoadGot::report_and_fail() const {
 52   tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
 53   fatal("not a indirect rip mov to rbx");
 54 }
 55 
 56 void NativeLoadGot::verify() const {
 57   assert(is_adrp_at((address)this), "must be adrp");
 58 }
 59 
 60 address NativeLoadGot::got_address() const {
 61   return MacroAssembler::target_addr_for_insn((address)this);
 62 }
 63 
 64 intptr_t NativeLoadGot::data() const {
 65   return *(intptr_t *) got_address();
 66 }
 67 
 68 address NativePltCall::destination() const {
 69   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 70   return *(address*)MacroAssembler::target_addr_for_insn((address)jump);
 71 }
 72 
 73 address NativePltCall::plt_entry() const {
 74   return MacroAssembler::target_addr_for_insn((address)this);
 75 }
 76 
 77 address NativePltCall::plt_jump() const {
 78   address entry = plt_entry();
 79   // Virtual PLT code has move instruction first
 80   if (((NativeGotJump*)entry)->is_GotJump()) {
 81     return entry;
 82   } else {
 83     return nativeLoadGot_at(entry)->next_instruction_address();
 84   }
 85 }
 86 
 87 address NativePltCall::plt_load_got() const {
 88   address entry = plt_entry();
 89   if (!((NativeGotJump*)entry)->is_GotJump()) {
 90     // Virtual PLT code has move instruction first
 91     return entry;
 92   } else {
 93     // Static PLT code has move instruction second (from c2i stub)
 94     return nativeGotJump_at(entry)->next_instruction_address();
 95   }
 96 }
 97 
 98 address NativePltCall::plt_c2i_stub() const {
 99   address entry = plt_load_got();
100   // This method should be called only for static calls which has C2I stub.
101   NativeLoadGot* load = nativeLoadGot_at(entry);
102   return entry;
103 }
104 
105 address NativePltCall::plt_resolve_call() const {
106   NativeGotJump* jump = nativeGotJump_at(plt_jump());
107   address entry = jump->next_instruction_address();
108   if (((NativeGotJump*)entry)->is_GotJump()) {
109     return entry;
110   } else {
111     // c2i stub 2 instructions
112     entry = nativeLoadGot_at(entry)->next_instruction_address();
113     return nativeGotJump_at(entry)->next_instruction_address();
114   }
115 }
116 
117 void NativePltCall::reset_to_plt_resolve_call() {
118   set_destination_mt_safe(plt_resolve_call());
119 }
120 
121 void NativePltCall::set_destination_mt_safe(address dest) {
122   // rewriting the value in the GOT, it should always be aligned
123   NativeGotJump* jump = nativeGotJump_at(plt_jump());
124   address* got = (address *) jump->got_address();
125   *got = dest;
126 }
127 
128 void NativePltCall::set_stub_to_clean() {
129   NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub());
130   NativeGotJump* jump          = nativeGotJump_at(method_loader->next_instruction_address());
131   method_loader->set_data(0);
132   jump->set_jump_destination((address)-1);
133 }
134 
135 void NativePltCall::verify() const {
136   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
137 }
138 
139 address NativeGotJump::got_address() const {
140   return MacroAssembler::target_addr_for_insn((address)this);
141 }
142 
143 address NativeGotJump::destination() const {
144   address *got_entry = (address *) got_address();
145   return *got_entry;
146 }
147 
148 bool NativeGotJump::is_GotJump() const {
149   NativeInstruction *insn =
150     nativeInstruction_at(addr_at(3 * NativeInstruction::instruction_size));
151   return insn->encoding() == 0xd61f0200; // br x16
152 }
153 
154 void NativeGotJump::verify() const {
155   assert(is_adrp_at((address)this), "must be adrp");
156 }
157 
158 address NativeCall::destination() const {
159   address addr = (address)this;
160   address destination = instruction_address() + displacement();
161 
162   // Do we use a trampoline stub for this call?
163   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
164   assert(cb && cb->is_nmethod(), "sanity");
165   nmethod *nm = (nmethod *)cb;
166   if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
167     // Yes we do, so get the destination from the trampoline stub.
168     const address trampoline_stub_addr = destination;
169     destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
170   }
171 
172   return destination;
173 }
174 
175 // Similar to replace_mt_safe, but just changes the destination. The
176 // important thing is that free-running threads are able to execute this
177 // call instruction at all times.
178 //
179 // Used in the runtime linkage of calls; see class CompiledIC.
180 //
181 // Add parameter assert_lock to switch off assertion
182 // during code generation, where no patching lock is needed.
183 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
184   assert(!assert_lock ||
185          (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
186          CompiledICLocker::is_safe(addr_at(0)),
187          "concurrent code patching");
188 
189   ResourceMark rm;
190   int code_size = NativeInstruction::instruction_size;
191   address addr_call = addr_at(0);
192   bool reachable = Assembler::reachable_from_branch_at(addr_call, dest);
193   assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
194 
195   // Patch the constant in the call's trampoline stub.
196   address trampoline_stub_addr = get_trampoline();
197   if (trampoline_stub_addr != NULL) {
198     assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines");
199     nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
200   }
201 
202   // Patch the call.
203   if (reachable) {
204     set_destination(dest);
205   } else {
206     assert (trampoline_stub_addr != NULL, "we need a trampoline");
207     set_destination(trampoline_stub_addr);
208   }
209 
210   ICache::invalidate_range(addr_call, instruction_size);
211 }
212 
213 address NativeCall::get_trampoline() {
214   address call_addr = addr_at(0);
215 
216   CodeBlob *code = CodeCache::find_blob(call_addr);
217   assert(code != NULL, "Could not find the containing code blob");
218 
219   address bl_destination
220     = MacroAssembler::pd_call_destination(call_addr);
221   if (code->contains(bl_destination) &&
222       is_NativeCallTrampolineStub_at(bl_destination))
223     return bl_destination;
224 
225   if (code->is_nmethod()) {
226     return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
227   }
228 
229   return NULL;
230 }
231 
232 // Inserts a native call instruction at a given pc
233 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
234 
235 //-------------------------------------------------------------------
236 
237 void NativeMovConstReg::verify() {
238   if (! (nativeInstruction_at(instruction_address())->is_movz() ||
239         is_adrp_at(instruction_address()) ||
240         is_ldr_literal_at(instruction_address())) ) {
241     fatal("should be MOVZ or ADRP or LDR (literal)");
242   }
243 }
244 
245 
246 intptr_t NativeMovConstReg::data() const {
247   // das(uint64_t(instruction_address()),2);
248   address addr = MacroAssembler::target_addr_for_insn(instruction_address());
249   if (maybe_cpool_ref(instruction_address())) {
250     return *(intptr_t*)addr;
251   } else {
252     return (intptr_t)addr;
253   }
254 }
255 
256 void NativeMovConstReg::set_data(intptr_t x) {
257   if (maybe_cpool_ref(instruction_address())) {
258     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
259     *(intptr_t*)addr = x;
260   } else {
261     // Store x into the instruction stream.
262     MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
263     ICache::invalidate_range(instruction_address(), instruction_size);
264   }
265 
266   // Find and replace the oop/metadata corresponding to this
267   // instruction in oops section.
268   CodeBlob* cb = CodeCache::find_blob(instruction_address());
269   nmethod* nm = cb->as_nmethod_or_null();
270   if (nm != NULL) {
271     RelocIterator iter(nm, instruction_address(), next_instruction_address());
272     while (iter.next()) {
273       if (iter.type() == relocInfo::oop_type) {
274         oop* oop_addr = iter.oop_reloc()->oop_addr();
275         *oop_addr = cast_to_oop(x);
276         break;
277       } else if (iter.type() == relocInfo::metadata_type) {
278         Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
279         *metadata_addr = (Metadata*)x;
280         break;
281       }
282     }
283   }
284 }
285 
286 void NativeMovConstReg::print() {
287   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
288                 p2i(instruction_address()), data());
289 }
290 
291 //-------------------------------------------------------------------
292 
293 int NativeMovRegMem::offset() const  {
294   address pc = instruction_address();
295   unsigned insn = *(unsigned*)pc;
296   if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
297     address addr = MacroAssembler::target_addr_for_insn(pc);
298     return *addr;
299   } else {
300     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address());
301   }
302 }
303 
304 void NativeMovRegMem::set_offset(int x) {
305   address pc = instruction_address();
306   unsigned insn = *(unsigned*)pc;
307   if (maybe_cpool_ref(pc)) {
308     address addr = MacroAssembler::target_addr_for_insn(pc);
309     *(int64_t*)addr = x;
310   } else {
311     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
312     ICache::invalidate_range(instruction_address(), instruction_size);
313   }
314 }
315 
316 void NativeMovRegMem::verify() {
317 #ifdef ASSERT
318   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
319 #endif
320 }
321 
322 //--------------------------------------------------------------------------------
323 
324 void NativeJump::verify() { ; }
325 
326 
327 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
328 }
329 
330 
331 address NativeJump::jump_destination() const          {
332   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
333 
334   // We use jump to self as the unresolved address which the inline
335   // cache code (and relocs) know about
336   // As a special case we also use sequence movptr(r,0); br(r);
337   // i.e. jump to 0 when we need leave space for a wide immediate
338   // load
339 
340   // return -1 if jump to self or to 0
341   if ((dest == (address)this) || dest == 0) {
342     dest = (address) -1;
343   }
344   return dest;
345 }
346 
347 void NativeJump::set_jump_destination(address dest) {
348   // We use jump to self as the unresolved address which the inline
349   // cache code (and relocs) know about
350   if (dest == (address) -1)
351     dest = instruction_address();
352 
353   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
354   ICache::invalidate_range(instruction_address(), instruction_size);
355 };
356 
357 //-------------------------------------------------------------------
358 
359 address NativeGeneralJump::jump_destination() const {
360   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
361   address dest = (address) move->data();
362 
363   // We use jump to self as the unresolved address which the inline
364   // cache code (and relocs) know about
365   // As a special case we also use jump to 0 when first generating
366   // a general jump
367 
368   // return -1 if jump to self or to 0
369   if ((dest == (address)this) || dest == 0) {
370     dest = (address) -1;
371   }
372   return dest;
373 }
374 
375 void NativeGeneralJump::set_jump_destination(address dest) {
376   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
377 
378   // We use jump to self as the unresolved address which the inline
379   // cache code (and relocs) know about
380   if (dest == (address) -1) {
381     dest = instruction_address();
382   }
383 
384   move->set_data((uintptr_t) dest);
385 };
386 
387 //-------------------------------------------------------------------
388 
389 bool NativeInstruction::is_safepoint_poll() {
390   // a safepoint_poll is implemented in two steps as either
391   //
392   // adrp(reg, polling_page);
393   // ldr(zr, [reg, #offset]);
394   //
395   // or
396   //
397   // mov(reg, polling_page);
398   // ldr(zr, [reg, #offset]);
399   //
400   // or
401   //
402   // ldr(reg, [rthread, #offset]);
403   // ldr(zr, [reg, #offset]);
404   //
405   // however, we cannot rely on the polling page address load always
406   // directly preceding the read from the page. C1 does that but C2
407   // has to do the load and read as two independent instruction
408   // generation steps. that's because with a single macro sequence the
409   // generic C2 code can only add the oop map before the mov/adrp and
410   // the trap handler expects an oop map to be associated with the
411   // load. with the load scheuled as a prior step the oop map goes
412   // where it is needed.
413   //
414   // so all we can do here is check that marked instruction is a load
415   // word to zr
416   return is_ldrw_to_zr(address(this));
417 }
418 
419 bool NativeInstruction::is_adrp_at(address instr) {
420   unsigned insn = *(unsigned*)instr;
421   return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000;
422 }
423 
424 bool NativeInstruction::is_ldr_literal_at(address instr) {
425   unsigned insn = *(unsigned*)instr;
426   return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000;
427 }
428 
429 bool NativeInstruction::is_ldrw_to_zr(address instr) {
430   unsigned insn = *(unsigned*)instr;
431   return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
432           Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
433 }
434 
435 bool NativeInstruction::is_general_jump() {
436   if (is_movz()) {
437     NativeInstruction* inst1 = nativeInstruction_at(addr_at(instruction_size * 1));
438     if (inst1->is_movk()) {
439       NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2));
440       if (inst2->is_movk()) {
441         NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3));
442         if (inst3->is_blr()) {
443           return true;
444         }
445       }
446     }
447   }
448   return false;
449 }
450 
451 bool NativeInstruction::is_movz() {
452   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
453 }
454 
455 bool NativeInstruction::is_movk() {
456   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
457 }
458 
459 bool NativeInstruction::is_sigill_zombie_not_entrant() {
460   return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead
461 }
462 
463 void NativeIllegalInstruction::insert(address code_pos) {
464   *(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead
465 }
466 
467 bool NativeInstruction::is_stop() {
468   return uint_at(0) == 0xd4bbd5c1; // dcps1 #0xdeae
469 }
470 
471 //-------------------------------------------------------------------
472 
473 // MT-safe inserting of a jump over a jump or a nop (used by
474 // nmethod::make_not_entrant_or_zombie)
475 
476 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
477 
478   assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
479   assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
480          || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
481          "Aarch64 cannot replace non-jump with jump");
482 
483   // Patch this nmethod atomically.
484   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
485     ptrdiff_t disp = dest - verified_entry;
486     guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow");
487 
488     unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff);
489     *(unsigned int*)verified_entry = insn;
490   } else {
491     // We use an illegal instruction for marking a method as
492     // not_entrant or zombie.
493     NativeIllegalInstruction::insert(verified_entry);
494   }
495 
496   ICache::invalidate_range(verified_entry, instruction_size);
497 }
498 
499 void NativeGeneralJump::verify() {  }
500 
501 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
502   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
503 
504   CodeBuffer cb(code_pos, instruction_size);
505   MacroAssembler a(&cb);
506 
507   a.movptr(rscratch1, (uintptr_t)entry);
508   a.br(rscratch1);
509 
510   ICache::invalidate_range(code_pos, instruction_size);
511 }
512 
513 // MT-safe patching of a long jump instruction.
514 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
515   ShouldNotCallThis();
516 }
517 
518 address NativeCallTrampolineStub::destination(nmethod *nm) const {
519   return ptr_at(data_offset);
520 }
521 
522 void NativeCallTrampolineStub::set_destination(address new_destination) {
523   set_ptr_at(data_offset, new_destination);
524   OrderAccess::release();
525 }
526 
527 // Generate a trampoline for a branch to dest.  If there's no need for a
528 // trampoline, simply patch the call directly to dest.
529 address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) {
530   MacroAssembler a(&cbuf);
531   address stub = NULL;
532 
533   if (a.far_branches()
534       && ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) {
535     stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest);
536   }
537 
538   if (stub == NULL) {
539     // If we generated no stub, patch this call directly to dest.
540     // This will happen if we don't need far branches or if there
541     // already was a trampoline.
542     set_destination(dest);
543   }
544 
545   return stub;
546 }