< prev index next >

src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp

Print this page




1257                      relocInfo::runtime_call_type);
1258     __ BIND(valid);
1259   }
1260 
1261   // Argument is valid and klass is as expected, continue.
1262 
1263   // Extract method from inline cache, verified entry point needs it.
1264   __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic);
1265   assert(R19_method == ic, "the inline cache register is dead here");
1266 
1267   __ ld(code, method_(code));
1268   __ cmpdi(CCR0, code, 0);
1269   __ ld(ientry, method_(interpreter_entry)); // preloaded
1270   __ beq_predict_taken(CCR0, call_interpreter);
1271 
1272   // Branch to ic_miss_stub.
1273   __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
1274 
1275   // entry: c2i
1276 
1277   c2i_entry = __ pc();
1278 
1279   // Class initialization barrier for static methods
1280   if (VM_Version::supports_fast_class_init_checks()) {
1281     Label L_skip_barrier;
1282 
1283     { // Bypass the barrier for non-static methods
1284       __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
1285       __ andi_(R0, R0, JVM_ACC_STATIC);
1286       __ beq(CCR0, L_skip_barrier); // non-static
1287     }
1288 
1289     Register klass = R11_scratch1;
1290     __ load_method_holder(klass, R19_method);
1291     __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
1292 
1293     __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
1294     __ mtctr(klass);
1295     __ bctr();
1296 
1297     __ bind(L_skip_barrier);
1298   }
1299 
1300   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
1301 
1302   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1303 }
1304 
1305 #ifdef COMPILER2
1306 // An oop arg. Must pass a handle not the oop itself.
1307 static void object_move(MacroAssembler* masm,
1308                         int frame_size_in_slots,
1309                         OopMap* oop_map, int oop_handle_offset,
1310                         bool is_receiver, int* receiver_offset,
1311                         VMRegPair src, VMRegPair dst,
1312                         Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
1313   assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
1314          "receiver has already been moved");
1315 
1316   // We must pass a handle. First figure out the location we use as a handle.
1317 
1318   if (src.first()->is_stack()) {
1319     // stack to stack or reg
1320 


2110 
2111   __ cmpdi(CCR0, R3_ARG1, 0);
2112   __ beq(CCR0, ic_miss);
2113   __ verify_oop(R3_ARG1);
2114   __ load_klass(receiver_klass, R3_ARG1);
2115 
2116   __ cmpd(CCR0, receiver_klass, ic);
2117   __ bne(CCR0, ic_miss);
2118   }
2119 
2120 
2121   // Generate the Verified Entry Point (VEP).
2122   // --------------------------------------------------------------------------
2123   vep_start_pc = (intptr_t)__ pc();
2124 
2125   if (UseRTMLocking) {
2126     // Abort RTM transaction before calling JNI
2127     // because critical section can be large and
2128     // abort anyway. Also nmethod can be deoptimized.
2129     __ tabort_();
2130   }
2131 
2132   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
2133     Label L_skip_barrier;
2134     Register klass = r_temp_1;
2135     // Notify OOP recorder (don't need the relocation)
2136     AddressLiteral md = __ constant_metadata_address(method->method_holder());
2137     __ load_const_optimized(klass, md.value(), R0);
2138     __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
2139 
2140     __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
2141     __ mtctr(klass);
2142     __ bctr();
2143 
2144     __ bind(L_skip_barrier);
2145   }
2146 
2147   __ save_LR_CR(r_temp_1);
2148   __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
2149   __ mr(r_callers_sp, R1_SP);                            // Remember frame pointer.
2150   __ push_frame(frame_size_in_bytes, r_temp_1);          // Push the c2n adapter's frame.
2151   frame_done_pc = (intptr_t)__ pc();
2152 
2153   __ verify_thread();
2154 
2155   // Native nmethod wrappers never take possesion of the oop arguments.
2156   // So the caller will gc the arguments.
2157   // The only thing we need an oopMap for is if the call is static.
2158   //
2159   // An OopMap for lock (and class if static), and one for the VM call itself.
2160   OopMapSet *oop_maps = new OopMapSet();
2161   OopMap    *oop_map  = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2162 
2163   if (is_critical_native) {
2164     check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset,




1257                      relocInfo::runtime_call_type);
1258     __ BIND(valid);
1259   }
1260 
1261   // Argument is valid and klass is as expected, continue.
1262 
1263   // Extract method from inline cache, verified entry point needs it.
1264   __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic);
1265   assert(R19_method == ic, "the inline cache register is dead here");
1266 
1267   __ ld(code, method_(code));
1268   __ cmpdi(CCR0, code, 0);
1269   __ ld(ientry, method_(interpreter_entry)); // preloaded
1270   __ beq_predict_taken(CCR0, call_interpreter);
1271 
1272   // Branch to ic_miss_stub.
1273   __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
1274 
1275   // entry: c2i
1276 
1277   c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);























1278 
1279   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1280 }
1281 
1282 #ifdef COMPILER2
1283 // An oop arg. Must pass a handle not the oop itself.
1284 static void object_move(MacroAssembler* masm,
1285                         int frame_size_in_slots,
1286                         OopMap* oop_map, int oop_handle_offset,
1287                         bool is_receiver, int* receiver_offset,
1288                         VMRegPair src, VMRegPair dst,
1289                         Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
1290   assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
1291          "receiver has already been moved");
1292 
1293   // We must pass a handle. First figure out the location we use as a handle.
1294 
1295   if (src.first()->is_stack()) {
1296     // stack to stack or reg
1297 


2087 
2088   __ cmpdi(CCR0, R3_ARG1, 0);
2089   __ beq(CCR0, ic_miss);
2090   __ verify_oop(R3_ARG1);
2091   __ load_klass(receiver_klass, R3_ARG1);
2092 
2093   __ cmpd(CCR0, receiver_klass, ic);
2094   __ bne(CCR0, ic_miss);
2095   }
2096 
2097 
2098   // Generate the Verified Entry Point (VEP).
2099   // --------------------------------------------------------------------------
2100   vep_start_pc = (intptr_t)__ pc();
2101 
2102   if (UseRTMLocking) {
2103     // Abort RTM transaction before calling JNI
2104     // because critical section can be large and
2105     // abort anyway. Also nmethod can be deoptimized.
2106     __ tabort_();















2107   }
2108 
2109   __ save_LR_CR(r_temp_1);
2110   __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
2111   __ mr(r_callers_sp, R1_SP);                            // Remember frame pointer.
2112   __ push_frame(frame_size_in_bytes, r_temp_1);          // Push the c2n adapter's frame.
2113   frame_done_pc = (intptr_t)__ pc();
2114 
2115   __ verify_thread();
2116 
2117   // Native nmethod wrappers never take possesion of the oop arguments.
2118   // So the caller will gc the arguments.
2119   // The only thing we need an oopMap for is if the call is static.
2120   //
2121   // An OopMap for lock (and class if static), and one for the VM call itself.
2122   OopMapSet *oop_maps = new OopMapSet();
2123   OopMap    *oop_map  = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2124 
2125   if (is_critical_native) {
2126     check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset,


< prev index next >