< prev index next > src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
Print this page
// Generate the Verified Entry Point (VEP).
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
- if (UseRTMLocking) {
- // Abort RTM transaction before calling JNI
- // because critical section can be large and
- // abort anyway. Also nmethod can be deoptimized.
- __ tabort_();
- }
-
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
Label L_skip_barrier;
Register klass = r_temp_1;
// Notify OOP recorder (don't need the relocation)
AddressLiteral md = __ constant_metadata_address(method->method_holder());
// Get the lock box slot's address.
__ addi(r_box, R1_SP, lock_offset);
// Try fastpath for locking.
! // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
! __ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
__ beq(CCR0, locked);
// None of the above fast optimizations worked so we have to get into the
// slow case of monitor enter. Inline a special case of call_VM that
// disallows any pending_exception.
// Get the lock box slot's address.
__ addi(r_box, R1_SP, lock_offset);
// Try fastpath for locking.
! if (LockingMode == LM_LIGHTWEIGHT) {
! // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
+ __ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_temp_1, r_temp_2, r_temp_3);
+ } else {
+ // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
+ __ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
+ }
__ beq(CCR0, locked);
// None of the above fast optimizations worked so we have to get into the
// slow case of monitor enter. Inline a special case of call_VM that
// disallows any pending_exception.
__ ld(r_oop, receiver_offset, R1_SP);
}
__ addi(r_box, R1_SP, lock_offset);
// Try fastpath for unlocking.
! __ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
__ beq(CCR0, done);
// Save and restore any potential method result value around the unlocking operation.
save_native_result(masm, ret_type, workspace_slot_offset);
__ ld(r_oop, receiver_offset, R1_SP);
}
__ addi(r_box, R1_SP, lock_offset);
// Try fastpath for unlocking.
! if (LockingMode == LM_LIGHTWEIGHT) {
+ __ compiler_fast_unlock_lightweight_object(CCR0, r_oop, r_temp_1, r_temp_2, r_temp_3);
+ } else {
+ __ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
+ }
__ beq(CCR0, done);
// Save and restore any potential method result value around the unlocking operation.
save_native_result(masm, ret_type, workspace_slot_offset);
// Setup code generation tools.
CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
address start = __ pc();
- if (UseRTMLocking) {
- // Abort RTM transaction before possible nmethod deoptimization.
- __ tabort_();
- }
-
Register unroll_block_reg = R21_tmp1;
Register klass_index_reg = R22_tmp2;
Register unc_trap_reg = R23_tmp3;
Register r_return_pc = R27_tmp7;
} else {
// Use thread()->saved_exception_pc() as return pc.
return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
}
- if (UseRTMLocking) {
- // Abort RTM transaction before calling runtime
- // because critical section can be large and so
- // will abort anyway. Also nmethod can be deoptimized.
- __ tabort_();
- }
-
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
// Save registers, fpu state, and flags. Set R31 = return pc.
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&frame_size_in_bytes,
< prev index next >