< prev index next >

src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp

Print this page

 132   assert(base.index() == noreg, "must be");
 133   if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); }
 134   return Address(base.base(), base.disp() + BytesPerWord);
 135 }
 136 
 137 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 138   return as_Address(addr);
 139 }
 140 
 141 
 142 void LIR_Assembler::osr_entry() {
 143   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 144   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 145   ValueStack* entry_state = osr_entry->end()->state();
 146   int number_of_locks = entry_state->locks_size();
 147 
 148   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 149   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 150 
 151   assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 152   int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord;
 153   for (int i = 0; i < number_of_locks; i++) {
 154     int slot_offset = monitor_offset - (i * 2 * BytesPerWord);
 155     __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord));
 156     __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord));
 157     __ str(R1, frame_map()->address_for_monitor_lock(i));
 158     __ str(R2, frame_map()->address_for_monitor_object(i));
 159   }
 160 }
 161 
 162 
 163 int LIR_Assembler::check_icache() {
 164   Register receiver = LIR_Assembler::receiverOpr()->as_register();
 165   int offset = __ offset();
 166   __ inline_cache_check(receiver, Ricklass);
 167   return offset;
 168 }
 169 
 170 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 171   ShouldNotReachHere(); // not implemented
 172 }
 173 
 174 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
 175   jobject o = (jobject)Universe::non_oop_word();
 176   int index = __ oop_recorder()->allocate_oop_index(o);
 177 
 178   PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index);

 226   if (CommentedAssembly) {
 227     _masm->block_comment("Unwind handler");
 228   }
 229 #endif
 230 
 231   int offset = code_offset();
 232 
 233   // Fetch the exception from TLS and clear out exception related thread state
 234   Register zero = __ zero_register(Rtemp);
 235   __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
 236   __ str(zero, Address(Rthread, JavaThread::exception_oop_offset()));
 237   __ str(zero, Address(Rthread, JavaThread::exception_pc_offset()));
 238 
 239   __ bind(_unwind_handler_entry);
 240   __ verify_not_null_oop(Rexception_obj);
 241 
 242   // Perform needed unlocking
 243   MonitorExitStub* stub = NULL;
 244   if (method()->is_synchronized()) {
 245     monitor_address(0, FrameMap::R0_opr);
 246     stub = new MonitorExitStub(FrameMap::R0_opr, true, 0);
 247     __ unlock_object(R2, R1, R0, *stub->entry());

 248     __ bind(*stub->continuation());
 249   }
 250 
 251   // remove the activation and dispatch to the unwind handler
 252   __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR
 253   __ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
 254 
 255   // Emit the slow path assembly
 256   if (stub != NULL) {
 257     stub->emit_code(this);
 258   }
 259 
 260   return offset;
 261 }
 262 
 263 
 264 int LIR_Assembler::emit_deopt_handler() {
 265   address handler_base = __ start_a_stub(deopt_handler_size());
 266   if (handler_base == NULL) {
 267     bailout("deopt handler overflow");

 954                      op->tmp2()->as_register(),
 955                      op->tmp3()->as_register(),
 956                      op->header_size(),
 957                      op->object_size(),
 958                      op->klass()->as_register(),
 959                      *op->stub()->entry());
 960   __ bind(*op->stub()->continuation());
 961 }
 962 
 963 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
 964   if (UseSlowPath ||
 965       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
 966       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
 967     __ b(*op->stub()->entry());
 968   } else {
 969     __ allocate_array(op->obj()->as_register(),
 970                       op->len()->as_register(),
 971                       op->tmp1()->as_register(),
 972                       op->tmp2()->as_register(),
 973                       op->tmp3()->as_register(),
 974                       arrayOopDesc::header_size(op->type()),
 975                       type2aelembytes(op->type()),
 976                       op->klass()->as_register(),
 977                       *op->stub()->entry());
 978   }
 979   __ bind(*op->stub()->continuation());
 980 }
 981 
 982 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
 983                                         ciMethodData *md, ciProfileData *data,
 984                                         Register recv, Register tmp1, Label* update_done) {
 985   assert_different_registers(mdo, recv, tmp1);
 986   uint i;
 987   for (i = 0; i < VirtualCallData::row_limit(); i++) {
 988     Label next_test;
 989     // See if the receiver is receiver[n].
 990     Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
 991                           mdo_offset_bias);
 992     __ ldr(tmp1, receiver_addr);
 993     __ verify_klass_ptr(tmp1);
 994     __ cmp(recv, tmp1);

2414   }
2415   if (op->halt()) {
2416     const char* str = __ code_string(op->msg());
2417     __ stop(str);
2418   } else {
2419     breakpoint();
2420   }
2421   __ bind(ok);
2422 }
2423 #endif // ASSERT
2424 
2425 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2426   fatal("CRC32 intrinsic is not implemented on this platform");
2427 }
2428 
2429 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2430   Register obj = op->obj_opr()->as_pointer_register();
2431   Register hdr = op->hdr_opr()->as_pointer_register();
2432   Register lock = op->lock_opr()->as_pointer_register();
2433 
2434   if (UseHeavyMonitors) {
2435     if (op->info() != NULL) {
2436       add_debug_info_for_null_check_here(op->info());
2437       __ null_check(obj);
2438     }
2439     __ b(*op->stub()->entry());
2440   } else if (op->code() == lir_lock) {
2441     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2442     int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
2443     if (op->info() != NULL) {
2444       add_debug_info_for_null_check(null_check_offset, op->info());
2445     }
2446   } else if (op->code() == lir_unlock) {
2447     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2448   } else {
2449     ShouldNotReachHere();
2450   }
2451   __ bind(*op->stub()->continuation());
2452 }
2453 
2454 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2455   Register obj = op->obj()->as_pointer_register();
2456   Register result = op->result_opr()->as_pointer_register();
2457 
2458   CodeEmitInfo* info = op->info();
2459   if (info != NULL) {
2460     add_debug_info_for_null_check_here(info);
2461   }
2462 
2463   if (UseCompressedClassPointers) { // On 32 bit arm??
2464     __ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes()));
2465   } else {
2466     __ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
2467   }
2468 }
2469 
2470 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {

2557       __ bind(update_done);
2558     }
2559   } else {
2560     // Static call
2561     __ ldr(tmp1, counter_addr);
2562     __ add(tmp1, tmp1, DataLayout::counter_increment);
2563     __ str(tmp1, counter_addr);
2564   }
2565 }
2566 
2567 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2568   fatal("Type profiling not implemented on this platform");
2569 }
2570 
2571 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2572   Unimplemented();
2573 }
2574 
2575 
2576 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2577   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2578   __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp());
2579 }
2580 
2581 
2582 void LIR_Assembler::align_backward_branch_target() {
2583   // Some ARM processors do better with 8-byte branch target alignment
2584   __ align(8);
2585 }
2586 
2587 
2588 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2589   // tmp must be unused
2590   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2591 
2592   if (left->is_single_cpu()) {
2593     assert (dest->type() == T_INT, "unexpected result type");
2594     assert (left->type() == T_INT, "unexpected left type");
2595     __ neg_32(dest->as_register(), left->as_register());
2596   } else if (left->is_double_cpu()) {
2597     Register dest_lo = dest->as_register_lo();

 132   assert(base.index() == noreg, "must be");
 133   if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); }
 134   return Address(base.base(), base.disp() + BytesPerWord);
 135 }
 136 
 137 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
 138   return as_Address(addr);
 139 }
 140 
 141 
 142 void LIR_Assembler::osr_entry() {
 143   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 144   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 145   ValueStack* entry_state = osr_entry->end()->state();
 146   int number_of_locks = entry_state->locks_size();
 147 
 148   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 149   Register OSR_buf = osrBufferPointer()->as_pointer_register();
 150 
 151   assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 152   int monitor_offset = (method()->max_locals() + (number_of_locks - 1)) * BytesPerWord;
 153   for (int i = 0; i < number_of_locks; i++) {
 154     int slot_offset = monitor_offset - (i * BytesPerWord);
 155     __ ldr(R1, Address(OSR_buf, slot_offset));
 156     __ str(R1, frame_map()->address_for_monitor_object(i));


 157   }
 158 }
 159 
 160 
 161 int LIR_Assembler::check_icache() {
 162   Register receiver = LIR_Assembler::receiverOpr()->as_register();
 163   int offset = __ offset();
 164   __ inline_cache_check(receiver, Ricklass);
 165   return offset;
 166 }
 167 
 168 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 169   ShouldNotReachHere(); // not implemented
 170 }
 171 
 172 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
 173   jobject o = (jobject)Universe::non_oop_word();
 174   int index = __ oop_recorder()->allocate_oop_index(o);
 175 
 176   PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index);

 224   if (CommentedAssembly) {
 225     _masm->block_comment("Unwind handler");
 226   }
 227 #endif
 228 
 229   int offset = code_offset();
 230 
 231   // Fetch the exception from TLS and clear out exception related thread state
 232   Register zero = __ zero_register(Rtemp);
 233   __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
 234   __ str(zero, Address(Rthread, JavaThread::exception_oop_offset()));
 235   __ str(zero, Address(Rthread, JavaThread::exception_pc_offset()));
 236 
 237   __ bind(_unwind_handler_entry);
 238   __ verify_not_null_oop(Rexception_obj);
 239 
 240   // Perform needed unlocking
 241   MonitorExitStub* stub = NULL;
 242   if (method()->is_synchronized()) {
 243     monitor_address(0, FrameMap::R0_opr);
 244     __ ldr(R1, Address(R0, BasicObjectLock::obj_offset_in_bytes()));
 245     stub = new MonitorExitStub(FrameMap::R1_opr);
 246     __ b(*stub->entry());
 247     __ bind(*stub->continuation());
 248   }
 249 
 250   // remove the activation and dispatch to the unwind handler
 251   __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR
 252   __ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
 253 
 254   // Emit the slow path assembly
 255   if (stub != NULL) {
 256     stub->emit_code(this);
 257   }
 258 
 259   return offset;
 260 }
 261 
 262 
 263 int LIR_Assembler::emit_deopt_handler() {
 264   address handler_base = __ start_a_stub(deopt_handler_size());
 265   if (handler_base == NULL) {
 266     bailout("deopt handler overflow");

 953                      op->tmp2()->as_register(),
 954                      op->tmp3()->as_register(),
 955                      op->header_size(),
 956                      op->object_size(),
 957                      op->klass()->as_register(),
 958                      *op->stub()->entry());
 959   __ bind(*op->stub()->continuation());
 960 }
 961 
 962 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
 963   if (UseSlowPath ||
 964       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
 965       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
 966     __ b(*op->stub()->entry());
 967   } else {
 968     __ allocate_array(op->obj()->as_register(),
 969                       op->len()->as_register(),
 970                       op->tmp1()->as_register(),
 971                       op->tmp2()->as_register(),
 972                       op->tmp3()->as_register(),
 973                       arrayOopDesc::base_offset_in_bytes(op->type()),
 974                       type2aelembytes(op->type()),
 975                       op->klass()->as_register(),
 976                       *op->stub()->entry());
 977   }
 978   __ bind(*op->stub()->continuation());
 979 }
 980 
 981 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
 982                                         ciMethodData *md, ciProfileData *data,
 983                                         Register recv, Register tmp1, Label* update_done) {
 984   assert_different_registers(mdo, recv, tmp1);
 985   uint i;
 986   for (i = 0; i < VirtualCallData::row_limit(); i++) {
 987     Label next_test;
 988     // See if the receiver is receiver[n].
 989     Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
 990                           mdo_offset_bias);
 991     __ ldr(tmp1, receiver_addr);
 992     __ verify_klass_ptr(tmp1);
 993     __ cmp(recv, tmp1);

2413   }
2414   if (op->halt()) {
2415     const char* str = __ code_string(op->msg());
2416     __ stop(str);
2417   } else {
2418     breakpoint();
2419   }
2420   __ bind(ok);
2421 }
2422 #endif // ASSERT
2423 
2424 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2425   fatal("CRC32 intrinsic is not implemented on this platform");
2426 }
2427 
2428 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2429   Register obj = op->obj_opr()->as_pointer_register();
2430   Register hdr = op->hdr_opr()->as_pointer_register();
2431   Register lock = op->lock_opr()->as_pointer_register();
2432 
2433   // TODO: Implement fast-locking.
2434   __ b(*op->stub()->entry());















2435   __ bind(*op->stub()->continuation());
2436 }
2437 
2438 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2439   Register obj = op->obj()->as_pointer_register();
2440   Register result = op->result_opr()->as_pointer_register();
2441 
2442   CodeEmitInfo* info = op->info();
2443   if (info != NULL) {
2444     add_debug_info_for_null_check_here(info);
2445   }
2446 
2447   if (UseCompressedClassPointers) { // On 32 bit arm??
2448     __ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes()));
2449   } else {
2450     __ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
2451   }
2452 }
2453 
2454 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {

2541       __ bind(update_done);
2542     }
2543   } else {
2544     // Static call
2545     __ ldr(tmp1, counter_addr);
2546     __ add(tmp1, tmp1, DataLayout::counter_increment);
2547     __ str(tmp1, counter_addr);
2548   }
2549 }
2550 
2551 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2552   fatal("Type profiling not implemented on this platform");
2553 }
2554 
2555 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2556   Unimplemented();
2557 }
2558 
2559 
2560 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2561   Address mon_addr = frame_map()->address_for_monitor_object(monitor_no);
2562   __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp());
2563 }
2564 
2565 
2566 void LIR_Assembler::align_backward_branch_target() {
2567   // Some ARM processors do better with 8-byte branch target alignment
2568   __ align(8);
2569 }
2570 
2571 
2572 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2573   // tmp must be unused
2574   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2575 
2576   if (left->is_single_cpu()) {
2577     assert (dest->type() == T_INT, "unexpected result type");
2578     assert (left->type() == T_INT, "unexpected left type");
2579     __ neg_32(dest->as_register(), left->as_register());
2580   } else if (left->is_double_cpu()) {
2581     Register dest_lo = dest->as_register_lo();
< prev index next >