< prev index next >

src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp

Print this page

 120   // locals[nlocals-1..0]
 121   // monitors[number_of_locks-1..0]
 122   //
 123   // Locals is a direct copy of the interpreter frame so in the osr buffer
 124   // the first slot in the local array is the last local from the interpreter
 125   // and the last slot is local[0] (receiver) from the interpreter.
 126   //
 127   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 128   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 129   // in the interpreter frame (the method lock if a sync method).
 130 
 131   // Initialize monitors in the compiled activation.
 132   //   R3: pointer to osr buffer
 133   //
 134   // All other registers are dead at this point and the locals will be
 135   // copied into place by code emitted in the IR.
 136 
 137   Register OSR_buf = osrBufferPointer()->as_register();
 138   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 139     int monitor_offset = BytesPerWord * method()->max_locals() +
 140       (2 * BytesPerWord) * (number_of_locks - 1);
 141     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 142     // the OSR buffer using 2 word entries: first the lock and then
 143     // the oop.
 144     for (int i = 0; i < number_of_locks; i++) {
 145       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 146 #ifdef ASSERT
 147       // Verify the interpreter's monitor has a non-null object.
 148       {
 149         Label L;
 150         __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 151         __ cmpdi(CCR0, R0, 0);
 152         __ bne(CCR0, L);
 153         __ stop("locked object is NULL");
 154         __ bind(L);
 155       }
 156 #endif // ASSERT
 157       // Copy the lock field into the compiled activation.
 158       Address ml = frame_map()->address_for_monitor_lock(i),
 159               mo = frame_map()->address_for_monitor_object(i);
 160       assert(ml.index() == noreg && mo.index() == noreg, "sanity");
 161       __ ld(R0, slot_offset + 0, OSR_buf);
 162       __ std(R0, ml.disp(), ml.base());
 163       __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 164       __ std(R0, mo.disp(), mo.base());
 165     }
 166   }
 167 }
 168 
 169 
 170 int LIR_Assembler::emit_exception_handler() {
 171   // Generate code for the exception handler.
 172   address handler_base = __ start_a_stub(exception_handler_size());
 173 
 174   if (handler_base == NULL) {
 175     // Not enough space left for the handler.
 176     bailout("exception handler overflow");
 177     return -1;
 178   }
 179 
 180   int offset = code_offset();
 181   address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
 182   //__ load_const_optimized(R0, entry_point);
 183   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));

 197   _masm->block_comment("Unwind handler");
 198 
 199   int offset = code_offset();
 200   bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes();
 201   const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31;
 202 
 203   // Fetch the exception from TLS and clear out exception related thread state.
 204   __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 205   __ li(R0, 0);
 206   __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 207   __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
 208 
 209   __ bind(_unwind_handler_entry);
 210   __ verify_not_null_oop(Rexception);
 211   if (preserve_exception) { __ mr(Rexception_save, Rexception); }
 212 
 213   // Perform needed unlocking
 214   MonitorExitStub* stub = NULL;
 215   if (method()->is_synchronized()) {
 216     monitor_address(0, FrameMap::R4_opr);
 217     stub = new MonitorExitStub(FrameMap::R4_opr, true, 0);
 218     __ unlock_object(R5, R6, R4, *stub->entry());

 219     __ bind(*stub->continuation());
 220   }
 221 
 222   if (compilation()->env()->dtrace_method_probes()) {
 223     Unimplemented();
 224   }
 225 
 226   // Dispatch to the unwind logic.
 227   address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id);
 228   //__ load_const_optimized(R0, unwind_stub);
 229   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub));
 230   if (preserve_exception) { __ mr(Rexception, Rexception_save); }
 231   __ mtctr(R0);
 232   __ bctr();
 233 
 234   // Emit the slow path assembly.
 235   if (stub != NULL) {
 236     stub->emit_code(this);
 237   }
 238 

2646   } else {
2647     __ sync();
2648   }
2649 }
2650 
2651 void LIR_Assembler::breakpoint() {
2652   __ illtrap();
2653 }
2654 
2655 
2656 void LIR_Assembler::push(LIR_Opr opr) {
2657   Unimplemented();
2658 }
2659 
2660 void LIR_Assembler::pop(LIR_Opr opr) {
2661   Unimplemented();
2662 }
2663 
2664 
2665 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2666   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2667   Register dst = dst_opr->as_register();
2668   Register reg = mon_addr.base();
2669   int offset = mon_addr.disp();
2670   // Compute pointer to BasicLock.
2671   __ add_const_optimized(dst, reg, offset);
2672 }
2673 
2674 
2675 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2676   Register obj = op->obj_opr()->as_register();
2677   Register hdr = op->hdr_opr()->as_register();
2678   Register lock = op->lock_opr()->as_register();
2679 
2680   // Obj may not be an oop.
2681   if (op->code() == lir_lock) {
2682     MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2683     if (!UseHeavyMonitors) {
2684       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2685       // Add debug info for NullPointerException only if one is possible.
2686       if (op->info() != NULL) {
2687         if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2688           explicit_null_check(obj, op->info());
2689         } else {
2690           add_debug_info_for_null_check_here(op->info());
2691         }
2692       }
2693       __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2694     } else {
2695       // always do slow locking
2696       // note: The slow locking code could be inlined here, however if we use
2697       //       slow locking, speed doesn't matter anyway and this solution is
2698       //       simpler and requires less duplicated code - additionally, the
2699       //       slow locking code is the same in either case which simplifies
2700       //       debugging.
2701       if (op->info() != NULL) {
2702         add_debug_info_for_null_check_here(op->info());
2703         __ null_check(obj);
2704       }
2705       __ b(*op->stub()->entry());
2706     }

2707   } else {
2708     assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2709     if (!UseHeavyMonitors) {
2710       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2711       __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2712     } else {
2713       // always do slow unlocking
2714       // note: The slow unlocking code could be inlined here, however if we use
2715       //       slow unlocking, speed doesn't matter anyway and this solution is
2716       //       simpler and requires less duplicated code - additionally, the
2717       //       slow unlocking code is the same in either case which simplifies
2718       //       debugging.
2719       __ b(*op->stub()->entry());
2720     }
2721   }
2722   __ bind(*op->stub()->continuation());
2723 }
2724 
2725 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2726   Register obj = op->obj()->as_pointer_register();
2727   Register result = op->result_opr()->as_pointer_register();
2728 
2729   CodeEmitInfo* info = op->info();
2730   if (info != NULL) {
2731     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2732       explicit_null_check(obj, info);
2733     } else {
2734       add_debug_info_for_null_check_here(info);
2735     }
2736   }
2737 
2738   if (UseCompressedClassPointers) {
2739     __ lwz(result, oopDesc::klass_offset_in_bytes(), obj);
2740     __ decode_klass_not_null(result);

 120   // locals[nlocals-1..0]
 121   // monitors[number_of_locks-1..0]
 122   //
 123   // Locals is a direct copy of the interpreter frame so in the osr buffer
 124   // the first slot in the local array is the last local from the interpreter
 125   // and the last slot is local[0] (receiver) from the interpreter.
 126   //
 127   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 128   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 129   // in the interpreter frame (the method lock if a sync method).
 130 
 131   // Initialize monitors in the compiled activation.
 132   //   R3: pointer to osr buffer
 133   //
 134   // All other registers are dead at this point and the locals will be
 135   // copied into place by code emitted in the IR.
 136 
 137   Register OSR_buf = osrBufferPointer()->as_register();
 138   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 139     int monitor_offset = BytesPerWord * method()->max_locals() +
 140       BytesPerWord * (number_of_locks - 1);



 141     for (int i = 0; i < number_of_locks; i++) {
 142       int slot_offset = monitor_offset - (i * BytesPerWord);
 143 #ifdef ASSERT
 144       // Verify the interpreter's monitor has a non-null object.
 145       {
 146         Label L;
 147         __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 148         __ cmpdi(CCR0, R0, 0);
 149         __ bne(CCR0, L);
 150         __ stop("locked object is NULL");
 151         __ bind(L);
 152       }
 153 #endif // ASSERT
 154       // Copy the lock field into the compiled activation.
 155       Address mo = frame_map()->address_for_monitor_object(i);
 156       assert(mo.index() == noreg, "sanity");

 157       __ ld(R0, slot_offset + 0, OSR_buf);


 158       __ std(R0, mo.disp(), mo.base());
 159     }
 160   }
 161 }
 162 
 163 
 164 int LIR_Assembler::emit_exception_handler() {
 165   // Generate code for the exception handler.
 166   address handler_base = __ start_a_stub(exception_handler_size());
 167 
 168   if (handler_base == NULL) {
 169     // Not enough space left for the handler.
 170     bailout("exception handler overflow");
 171     return -1;
 172   }
 173 
 174   int offset = code_offset();
 175   address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
 176   //__ load_const_optimized(R0, entry_point);
 177   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));

 191   _masm->block_comment("Unwind handler");
 192 
 193   int offset = code_offset();
 194   bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes();
 195   const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31;
 196 
 197   // Fetch the exception from TLS and clear out exception related thread state.
 198   __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 199   __ li(R0, 0);
 200   __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 201   __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
 202 
 203   __ bind(_unwind_handler_entry);
 204   __ verify_not_null_oop(Rexception);
 205   if (preserve_exception) { __ mr(Rexception_save, Rexception); }
 206 
 207   // Perform needed unlocking
 208   MonitorExitStub* stub = NULL;
 209   if (method()->is_synchronized()) {
 210     monitor_address(0, FrameMap::R4_opr);
 211     __ ld(R4, BasicObjectLock::obj_offset_in_bytes(), R4);
 212     stub = new MonitorExitStub(FrameMap::R4_opr);
 213     __ b(*stub->entry());
 214     __ bind(*stub->continuation());
 215   }
 216 
 217   if (compilation()->env()->dtrace_method_probes()) {
 218     Unimplemented();
 219   }
 220 
 221   // Dispatch to the unwind logic.
 222   address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id);
 223   //__ load_const_optimized(R0, unwind_stub);
 224   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub));
 225   if (preserve_exception) { __ mr(Rexception, Rexception_save); }
 226   __ mtctr(R0);
 227   __ bctr();
 228 
 229   // Emit the slow path assembly.
 230   if (stub != NULL) {
 231     stub->emit_code(this);
 232   }
 233 

2641   } else {
2642     __ sync();
2643   }
2644 }
2645 
2646 void LIR_Assembler::breakpoint() {
2647   __ illtrap();
2648 }
2649 
2650 
2651 void LIR_Assembler::push(LIR_Opr opr) {
2652   Unimplemented();
2653 }
2654 
2655 void LIR_Assembler::pop(LIR_Opr opr) {
2656   Unimplemented();
2657 }
2658 
2659 
2660 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2661   Address mon_addr = frame_map()->address_for_monitor_object(monitor_no);
2662   Register dst = dst_opr->as_register();
2663   Register reg = mon_addr.base();
2664   int offset = mon_addr.disp();
2665   // Compute pointer to BasicLock.
2666   __ add_const_optimized(dst, reg, offset);
2667 }
2668 
2669 
2670 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2671   Register obj = op->obj_opr()->as_register();
2672   Register hdr = op->hdr_opr()->as_register();
2673   Register lock = op->lock_opr()->as_register();
2674 
2675   // Obj may not be an oop.
2676   if (op->code() == lir_lock) {
2677     MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2678     // always do slow locking
2679     // note: The slow locking code could be inlined here, however if we use
2680     //       slow locking, speed doesn't matter anyway and this solution is
2681     //       simpler and requires less duplicated code - additionally, the
2682     //       slow locking code is the same in either case which simplifies
2683     //       debugging.
2684     if (op->info() != NULL) {
2685       add_debug_info_for_null_check_here(op->info());
2686       __ null_check(obj);














2687     }
2688     __ b(*op->stub()->entry());
2689   } else {
2690     assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2691     // always do slow unlocking
2692     // note: The slow unlocking code could be inlined here, however if we use
2693     //       slow unlocking, speed doesn't matter anyway and this solution is
2694     //       simpler and requires less duplicated code - additionally, the
2695     //       slow unlocking code is the same in either case which simplifies
2696     //       debugging.
2697     __ b(*op->stub()->entry());





2698   }
2699   __ bind(*op->stub()->continuation());
2700 }
2701 
2702 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2703   Register obj = op->obj()->as_pointer_register();
2704   Register result = op->result_opr()->as_pointer_register();
2705 
2706   CodeEmitInfo* info = op->info();
2707   if (info != NULL) {
2708     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2709       explicit_null_check(obj, info);
2710     } else {
2711       add_debug_info_for_null_check_here(info);
2712     }
2713   }
2714 
2715   if (UseCompressedClassPointers) {
2716     __ lwz(result, oopDesc::klass_offset_in_bytes(), obj);
2717     __ decode_klass_not_null(result);
< prev index next >