< prev index next >

src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp

Print this page

195   if (maybe_cpool_ref(pc)) {
196     address addr = MacroAssembler::target_addr_for_insn(pc);
197     *(int64_t*)addr = x;
198   } else {
199     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
200     ICache::invalidate_range(instruction_address(), instruction_size);
201   }
202 }
203 
204 void NativeMovRegMem::verify() {
205 #ifdef ASSERT
206   MacroAssembler::target_addr_for_insn(instruction_address());
207 #endif
208 }
209 
210 //--------------------------------------------------------------------------------
211 
212 void NativeJump::verify() { ; }
213 
214 

















215 address NativeJump::jump_destination() const          {
216   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
217 
218   // We use jump to self as the unresolved address which the inline
219   // cache code (and relocs) know about
220   // As a special case we also use sequence movptr(r,0); br(r);
221   // i.e. jump to 0 when we need leave space for a wide immediate
222   // load
223 
224   // return -1 if jump to self or to 0
225   if ((dest == (address)this) || dest == nullptr) {
226     dest = (address) -1;
227   }
228   return dest;
229 }
230 
231 void NativeJump::set_jump_destination(address dest) {
232   // We use jump to self as the unresolved address which the inline
233   // cache code (and relocs) know about
234   if (dest == (address) -1)

195   if (maybe_cpool_ref(pc)) {
196     address addr = MacroAssembler::target_addr_for_insn(pc);
197     *(int64_t*)addr = x;
198   } else {
199     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
200     ICache::invalidate_range(instruction_address(), instruction_size);
201   }
202 }
203 
204 void NativeMovRegMem::verify() {
205 #ifdef ASSERT
206   MacroAssembler::target_addr_for_insn(instruction_address());
207 #endif
208 }
209 
210 //--------------------------------------------------------------------------------
211 
212 void NativeJump::verify() { ; }
213 
214 
215 void NativeJump::insert(address code_pos, address entry) {
216   // Dispacement is relative to the jump instruction PC
217   intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos);
218 
219   // The jump immediate is 26 bits and it will at execution time be scaled by 4
220   int64_t imm26 = disp >> 2;
221 
222   // The farthest that we can jump is +/- 128MiB
223   guarantee(Assembler::is_simm(imm26, 26), "maximum offset is 128MiB, you asking for %ld", imm26);
224 
225   // Patch with opcode | offset
226   *((int32_t*)code_pos) = 0x14000000 | imm26;
227 
228   // Tell hardware to invalidate icache line containing code_pos
229   ICache::invalidate_range(code_pos, instruction_size);
230 }
231 
232 address NativeJump::jump_destination() const          {
233   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
234 
235   // We use jump to self as the unresolved address which the inline
236   // cache code (and relocs) know about
237   // As a special case we also use sequence movptr(r,0); br(r);
238   // i.e. jump to 0 when we need leave space for a wide immediate
239   // load
240 
241   // return -1 if jump to self or to 0
242   if ((dest == (address)this) || dest == nullptr) {
243     dest = (address) -1;
244   }
245   return dest;
246 }
247 
248 void NativeJump::set_jump_destination(address dest) {
249   // We use jump to self as the unresolved address which the inline
250   // cache code (and relocs) know about
251   if (dest == (address) -1)
< prev index next >