1 /*
  2  * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_ARM_NATIVEINST_ARM_32_HPP
 26 #define CPU_ARM_NATIVEINST_ARM_32_HPP
 27 
 28 #include "asm/macroAssembler.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "runtime/icache.hpp"
 31 #include "runtime/orderAccess.hpp"
 32 #include "runtime/os.hpp"
 33 #include "runtime/thread.hpp"
 34 #include "register_arm.hpp"
 35 
 36 // -------------------------------------------------------------------
 37 
 38 // Some experimental projects extend the ARM back-end by implementing
 39 // what the front-end usually assumes is a single native instruction
 40 // with a sequence of instructions.
 41 //
 42 // The 'Raw' variants are the low level initial code (usually one
 43 // instruction wide but some of them were already composed
 44 // instructions). They should be used only by the back-end.
 45 //
 46 // The non-raw classes are the front-end entry point, hiding potential
 47 // back-end extensions or the actual instructions size.
 48 class NativeInstruction;
 49 class NativeCall;
 50 
 51 class RawNativeInstruction {
 52  public:
 53 
 54   enum ARM_specific {
 55     instruction_size = Assembler::InstructionSize
 56   };
 57 
 58   enum InstructionKind {
 59     instr_ldr_str    = 0x50,
 60     instr_ldrh_strh  = 0x10,
 61     instr_fld_fst    = 0xd0
 62   };
 63 
 64   // illegal instruction used by NativeJump::patch_verified_entry
 65   // permanently undefined (UDF): 0xe << 28 | 0b1111111 << 20 | 0b1111 << 4
 66   static const int zombie_illegal_instruction = 0xe7f000f0;
 67 
 68   static int decode_rotated_imm12(int encoding) {
 69     int base = encoding & 0xff;
 70     int right_rotation = (encoding & 0xf00) >> 7;
 71     int left_rotation = 32 - right_rotation;
 72     int val = (base >> right_rotation) | (base << left_rotation);
 73     return val;
 74   }
 75 
 76   address addr_at(int offset)        const { return (address)this + offset; }
 77   address instruction_address()      const { return addr_at(0); }
 78   address next_raw_instruction_address() const { return addr_at(instruction_size); }
 79 
 80   static RawNativeInstruction* at(address address) {
 81     return (RawNativeInstruction*)address;
 82   }
 83   RawNativeInstruction* next_raw() const {
 84     return at(next_raw_instruction_address());
 85   }
 86 
 87  public:
 88   int encoding()                     const { return *(int*)this; }
 89 
 90   void set_encoding(int value) {
 91     int old = *(int*)this;
 92     if (old != value) {
 93       *(int*)this = value;
 94       ICache::invalidate_word((address)this);
 95     }
 96   }
 97 
 98   InstructionKind kind() const {
 99     return (InstructionKind) ((encoding() >> 20) & 0xf2);
100   }
101 
102   bool is_nop()            const { return encoding() == (int)0xe1a00000; }
103   bool is_b()              const { return (encoding() & 0x0f000000) == 0x0a000000; }
104   bool is_bx()             const { return (encoding() & 0x0ffffff0) == 0x012fff10; }
105   bool is_bl()             const { return (encoding() & 0x0f000000) == 0x0b000000; }
106   bool is_blx()            const { return (encoding() & 0x0ffffff0) == 0x012fff30; }
107   bool is_fat_call()       const {
108     return (is_add_lr() && next_raw()->is_jump());
109   }
110   bool is_ldr_call()       const {
111     return (is_add_lr() && next_raw()->is_ldr_pc());
112   }
113   bool is_jump()           const { return is_b() || is_ldr_pc(); }
114   bool is_call()           const { return is_bl() || is_fat_call(); }
115   bool is_branch()         const { return is_b() || is_bl(); }
116   bool is_far_branch()     const { return is_movw() || is_ldr_literal(); }
117   bool is_ldr_literal()    const {
118     // ldr Rx, [PC, #offset] for positive or negative offsets
119     return (encoding() & 0x0f7f0000) == 0x051f0000;
120   }
121   bool is_ldr()    const {
122     // ldr Rd, [Rn, #offset] for positive or negative offsets
123     return (encoding() & 0x0f700000) == 0x05100000;
124   }
125   int ldr_offset() const {
126     assert(is_ldr(), "must be");
127     int offset = encoding() & 0xfff;
128     if (encoding() & (1 << 23)) {
129       // positive offset
130     } else {
131       // negative offset
132       offset = -offset;
133     }
134     return offset;
135   }
136   // is_ldr_pc: ldr PC, PC, #offset
137   bool is_ldr_pc()         const { return (encoding() & 0x0f7ff000) == 0x051ff000; }
138   // is_setting_pc(): ldr PC, Rxx, #offset
139   bool is_setting_pc()         const { return (encoding() & 0x0f70f000) == 0x0510f000; }
140   bool is_add_lr()         const { return (encoding() & 0x0ffff000) == 0x028fe000; }
141   bool is_add_pc()         const { return (encoding() & 0x0fff0000) == 0x028f0000; }
142   bool is_sub_pc()         const { return (encoding() & 0x0fff0000) == 0x024f0000; }
143   bool is_pc_rel()         const { return is_add_pc() || is_sub_pc(); }
144   bool is_movw()           const { return (encoding() & 0x0ff00000) == 0x03000000; }
145   bool is_movt()           const { return (encoding() & 0x0ff00000) == 0x03400000; }
146   // c2 doesn't use fixed registers for safepoint poll address
147   bool is_safepoint_poll() const { return (encoding() & 0xfff0ffff) == 0xe590c000; }
148 };
149 
150 inline RawNativeInstruction* rawNativeInstruction_at(address address) {
151   return (RawNativeInstruction*)address;
152 }
153 
154 // Base class exported to the front-end
155 class NativeInstruction: public RawNativeInstruction {
156 public:
157   static NativeInstruction* at(address address) {
158     return (NativeInstruction*)address;
159   }
160 
161 public:
162   // No need to consider indirections while parsing NativeInstruction
163   address next_instruction_address() const {
164     return next_raw_instruction_address();
165   }
166 
167   // next() is no longer defined to avoid confusion.
168   //
169   // The front end and most classes except for those defined in nativeInst_arm
170   // or relocInfo_arm should only use next_instruction_address(), skipping
171   // over composed instruction and ignoring back-end extensions.
172   //
173   // The back-end can use next_raw() when it knows the instruction sequence
174   // and only wants to skip a single native instruction.
175 };
176 
177 inline NativeInstruction* nativeInstruction_at(address address) {
178   return (NativeInstruction*)address;
179 }
180 
181 // -------------------------------------------------------------------
182 // Raw b() or bl() instructions, not used by the front-end.
183 class RawNativeBranch: public RawNativeInstruction {
184  public:
185 
186   address destination(int adj = 0) const {
187     return instruction_address() + (encoding() << 8 >> 6) + 8 + adj;
188   }
189 
190   void set_destination(address dest) {
191     int new_offset = (int)(dest - instruction_address() - 8);
192     assert(new_offset < 0x2000000 && new_offset > -0x2000000, "encoding constraint");
193     set_encoding((encoding() & 0xff000000) | ((unsigned int)new_offset << 6 >> 8));
194   }
195 };
196 
197 inline RawNativeBranch* rawNativeBranch_at(address address) {
198   assert(rawNativeInstruction_at(address)->is_branch(), "must be");
199   return (RawNativeBranch*)address;
200 }
201 
202 class NativeBranch: public RawNativeBranch {
203 };
204 
205 inline NativeBranch* nativeBranch_at(address address) {
206   return (NativeBranch *) rawNativeBranch_at(address);
207 }
208 
209 // -------------------------------------------------------------------
210 // NativeGeneralJump is for patchable internal (near) jumps
211 // It is used directly by the front-end and must be a single instruction wide
212 // (to support patching to other kind of instructions).
213 class NativeGeneralJump: public RawNativeInstruction {
214  public:
215 
216   address jump_destination() const {
217     return rawNativeBranch_at(instruction_address())->destination();
218   }
219 
220   void set_jump_destination(address dest) {
221     return rawNativeBranch_at(instruction_address())->set_destination(dest);
222   }
223 
224   static void insert_unconditional(address code_pos, address entry);
225 
226   static void replace_mt_safe(address instr_addr, address code_buffer) {
227     assert(((int)instr_addr & 3) == 0 && ((int)code_buffer & 3) == 0, "must be aligned");
228     // Writing a word is atomic on ARM, so no MT-safe tricks are needed
229     rawNativeInstruction_at(instr_addr)->set_encoding(*(int*)code_buffer);
230   }
231 };
232 
233 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
234   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
235   return (NativeGeneralJump*)address;
236 }
237 
238 // -------------------------------------------------------------------
239 class RawNativeJump: public NativeInstruction {
240  public:
241 
242   address jump_destination(int adj = 0) const {
243     address a;
244     if (is_b()) {
245       a = rawNativeBranch_at(instruction_address())->destination(adj);
246       // Jump destination -1 is encoded as a jump to self
247       if (a == instruction_address()) {
248         return (address)-1;
249       }
250     } else {
251       assert(is_ldr_pc(), "must be");
252       int offset = this->ldr_offset();
253       a = *(address*)(instruction_address() + 8 + offset);
254     }
255     return a;
256   }
257 
258   void set_jump_destination(address dest) {
259     address a;
260     if (is_b()) {
261       // Jump destination -1 is encoded as a jump to self
262       if (dest == (address)-1) {
263         dest = instruction_address();
264       }
265       rawNativeBranch_at(instruction_address())->set_destination(dest);
266     } else {
267       assert(is_ldr_pc(), "must be");
268       int offset = this->ldr_offset();
269       *(address*)(instruction_address() + 8 + offset) = dest;
270       OrderAccess::storeload(); // overkill if caller holds lock?
271     }
272   }
273 
274   static void check_verified_entry_alignment(address entry, address verified_entry);
275 
276   static void patch_verified_entry(address entry, address verified_entry, address dest);
277 
278 };
279 
280 inline RawNativeJump* rawNativeJump_at(address address) {
281   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
282   return (RawNativeJump*)address;
283 }
284 
285 // -------------------------------------------------------------------
286 class RawNativeCall: public NativeInstruction {
287   // See IC calls in LIR_Assembler::ic_call(): ARM v5/v6 doesn't use a
288   // single bl for IC calls.
289 
290  public:
291 
292   address return_address() const {
293     if (is_bl()) {
294       return addr_at(instruction_size);
295     } else {
296       assert(is_fat_call(), "must be");
297       int offset = encoding() & 0xff;
298       return addr_at(offset + 8);
299     }
300   }
301 
302   address destination(int adj = 0) const {
303     if (is_bl()) {
304       return rawNativeBranch_at(instruction_address())->destination(adj);
305     } else {
306       assert(is_add_lr(), "must be"); // fat_call
307       RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
308       return next->jump_destination(adj);
309     }
310   }
311 
312   void set_destination(address dest) {
313     if (is_bl()) {
314       return rawNativeBranch_at(instruction_address())->set_destination(dest);
315     } else {
316       assert(is_add_lr(), "must be"); // fat_call
317       RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
318       return next->set_jump_destination(dest);
319     }
320   }
321 
322   void set_destination_mt_safe(address dest) {
323     assert(CodeCache::contains(dest), "external destination might be too far");
324     set_destination(dest);
325   }
326 
327   void verify() {
328     assert(RawNativeInstruction::is_call() || (!VM_Version::supports_movw() && RawNativeInstruction::is_jump()), "must be");
329   }
330 
331   void verify_alignment() {
332     // Nothing to do on ARM
333   }
334 
335   static bool is_call_before(address return_address);
336 };
337 
338 inline RawNativeCall* rawNativeCall_at(address address) {
339   assert(rawNativeInstruction_at(address)->is_call(), "must be");
340   return (RawNativeCall*)address;
341 }
342 
343 NativeCall* rawNativeCall_before(address return_address);
344 
345 // -------------------------------------------------------------------
346 // NativeMovRegMem need not be extended with indirection support.
347 // (field access patching is handled differently in that case)
348 class NativeMovRegMem: public NativeInstruction {
349  public:
350   enum arm_specific_constants {
351     instruction_size = 8
352   };
353 
354   int num_bytes_to_end_of_patch() const { return instruction_size; }
355 
356   int offset() const;
357   void set_offset(int x);
358 
359   void add_offset_in_bytes(int add_offset) {
360     set_offset(offset() + add_offset);
361   }
362 
363 };
364 
365 inline NativeMovRegMem* nativeMovRegMem_at(address address) {
366   NativeMovRegMem* instr = (NativeMovRegMem*)address;
367   assert(instr->kind() == NativeInstruction::instr_ldr_str   ||
368          instr->kind() == NativeInstruction::instr_ldrh_strh ||
369          instr->kind() == NativeInstruction::instr_fld_fst, "must be");
370   return instr;
371 }
372 
373 // -------------------------------------------------------------------
374 // NativeMovConstReg is primarily for loading oops and metadata
375 class NativeMovConstReg: public NativeInstruction {
376  public:
377 
378   intptr_t data() const;
379   void set_data(intptr_t x, address pc = 0);
380   bool is_pc_relative() {
381     return !is_movw();
382   }
383   void set_pc_relative_offset(address addr, address pc);
384   address next_instruction_address() const {
385     // NOTE: CompiledStaticCall::set_to_interpreted() calls this but
386     // are restricted to single-instruction ldr. No need to jump over
387     // several instructions.
388     assert(is_ldr_literal(), "Should only use single-instructions load");
389     return next_raw_instruction_address();
390   }
391 };
392 
393 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
394   NativeInstruction* ni = nativeInstruction_at(address);
395   assert(ni->is_ldr_literal() || ni->is_pc_rel() ||
396          ni->is_movw() && VM_Version::supports_movw(), "must be");
397   return (NativeMovConstReg*)address;
398 }
399 
400 // -------------------------------------------------------------------
401 // Front end classes, hiding experimental back-end extensions.
402 
403 // Extension to support indirections
404 class NativeJump: public RawNativeJump {
405  public:
406 };
407 
408 inline NativeJump* nativeJump_at(address address) {
409   assert(nativeInstruction_at(address)->is_jump(), "must be");
410   return (NativeJump*)address;
411 }
412 
413 class NativeCall: public RawNativeCall {
414 public:
415   // NativeCall::next_instruction_address() is used only to define the
416   // range where to look for the relocation information. We need not
417   // walk over composed instructions (as long as the relocation information
418   // is associated to the first instruction).
419   address next_instruction_address() const {
420     return next_raw_instruction_address();
421   }
422 
423 };
424 
425 inline NativeCall* nativeCall_at(address address) {
426   assert(nativeInstruction_at(address)->is_call() ||
427          (!VM_Version::supports_movw() && nativeInstruction_at(address)->is_jump()), "must be");
428   return (NativeCall*)address;
429 }
430 
431 inline NativeCall* nativeCall_before(address return_address) {
432   return (NativeCall *) rawNativeCall_before(return_address);
433 }
434 
435 #endif // CPU_ARM_NATIVEINST_ARM_32_HPP