< prev index next >

src/share/vm/c1/c1_Runtime1.cpp

Print this page




 940       nm->make_not_entrant();
 941     }
 942 
 943     Deoptimization::deoptimize_frame(thread, caller_frame.id());
 944 
 945     // Return to the now deoptimized frame.
 946   }
 947 
 948   // Now copy code back
 949 
 950   {
 951     MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
 952     //
 953     // Deoptimization may have happened while we waited for the lock.
 954     // In that case we don't bother to do any patching we just return
 955     // and let the deopt happen
 956     if (!caller_is_deopted()) {
 957       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
 958       address instr_pc = jump->jump_destination();
 959       NativeInstruction* ni = nativeInstruction_at(instr_pc);
 960       if (ni->is_jump() ) {
 961         // the jump has not been patched yet
 962         // The jump destination is slow case and therefore not part of the stubs
 963         // (stubs are only for StaticCalls)
 964 
 965         // format of buffer
 966         //    ....
 967         //    instr byte 0     <-- copy_buff
 968         //    instr byte 1
 969         //    ..
 970         //    instr byte n-1
 971         //      n
 972         //    ....             <-- call destination
 973 
 974         address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
 975         unsigned char* byte_count = (unsigned char*) (stub_location - 1);
 976         unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
 977         unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
 978         address copy_buff = stub_location - *byte_skip - *byte_count;
 979         address being_initialized_entry = stub_location - *being_initialized_entry_offset;
 980         if (TracePatching) {


1068               oop_Relocation* r = mds.oop_reloc();
1069               oop* oop_adr = r->oop_addr();
1070               *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
1071               r->fix_oop_relocation();
1072               found = true;
1073             } else if (mds.type() == relocInfo::metadata_type) {
1074               assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1075               metadata_Relocation* r = mds.metadata_reloc();
1076               Metadata** metadata_adr = r->metadata_addr();
1077               *metadata_adr = load_klass();
1078               r->fix_metadata_relocation();
1079               found = true;
1080             }
1081           }
1082           assert(found, "the metadata must exist!");
1083         }
1084 #endif
1085         if (do_patch) {
1086           // replace instructions
1087           // first replace the tail, then the call
1088 #ifdef ARM
1089           if((load_klass_or_mirror_patch_id ||
1090               stub_id == Runtime1::load_appendix_patching_id) &&
1091               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1092             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1093             address addr = NULL;
1094             assert(nm != NULL, "invalid nmethod_pc");
1095             RelocIterator mds(nm, copy_buff, copy_buff + 1);
1096             while (mds.next()) {
1097               if (mds.type() == relocInfo::oop_type) {
1098                 assert(stub_id == Runtime1::load_mirror_patching_id ||
1099                        stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1100                 oop_Relocation* r = mds.oop_reloc();
1101                 addr = (address)r->oop_addr();
1102                 break;
1103               } else if (mds.type() == relocInfo::metadata_type) {
1104                 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1105                 metadata_Relocation* r = mds.metadata_reloc();
1106                 addr = (address)r->metadata_addr();
1107                 break;
1108               }


1137             // the reloc info so that it will get updated during
1138             // future GCs.
1139             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1140             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1141                                                      relocInfo::none, rtype);
1142 #ifdef SPARC
1143             // Sparc takes two relocations for an metadata so update the second one.
1144             address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;
1145             RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1146             relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1147                                                      relocInfo::none, rtype);
1148 #endif
1149 #ifdef PPC
1150           { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
1151             RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1152             relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1153                                                      relocInfo::none, rtype);
1154           }
1155 #endif
1156           }









1157 
1158         } else {
1159           ICache::invalidate_range(copy_buff, *byte_count);
1160           NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1161         }
1162       }
1163     }
1164   }
1165 
1166   // If we are patching in a non-perm oop, make sure the nmethod
1167   // is on the right list.
1168   if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
1169                               (appendix.not_null() && appendix->is_scavengable()))) {
1170     MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1171     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1172     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1173     if (!nm->on_scavenge_root_list()) {
1174       CodeCache::add_scavenge_root_nmethod(nm);
1175     }
1176 




 940       nm->make_not_entrant();
 941     }
 942 
 943     Deoptimization::deoptimize_frame(thread, caller_frame.id());
 944 
 945     // Return to the now deoptimized frame.
 946   }
 947 
 948   // Now copy code back
 949 
 950   {
 951     MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
 952     //
 953     // Deoptimization may have happened while we waited for the lock.
 954     // In that case we don't bother to do any patching we just return
 955     // and let the deopt happen
 956     if (!caller_is_deopted()) {
 957       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
 958       address instr_pc = jump->jump_destination();
 959       NativeInstruction* ni = nativeInstruction_at(instr_pc);
 960       if (ni->is_jump()) {
 961         // the jump has not been patched yet
 962         // The jump destination is slow case and therefore not part of the stubs
 963         // (stubs are only for StaticCalls)
 964 
 965         // format of buffer
 966         //    ....
 967         //    instr byte 0     <-- copy_buff
 968         //    instr byte 1
 969         //    ..
 970         //    instr byte n-1
 971         //      n
 972         //    ....             <-- call destination
 973 
 974         address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
 975         unsigned char* byte_count = (unsigned char*) (stub_location - 1);
 976         unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
 977         unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
 978         address copy_buff = stub_location - *byte_skip - *byte_count;
 979         address being_initialized_entry = stub_location - *being_initialized_entry_offset;
 980         if (TracePatching) {


1068               oop_Relocation* r = mds.oop_reloc();
1069               oop* oop_adr = r->oop_addr();
1070               *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
1071               r->fix_oop_relocation();
1072               found = true;
1073             } else if (mds.type() == relocInfo::metadata_type) {
1074               assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1075               metadata_Relocation* r = mds.metadata_reloc();
1076               Metadata** metadata_adr = r->metadata_addr();
1077               *metadata_adr = load_klass();
1078               r->fix_metadata_relocation();
1079               found = true;
1080             }
1081           }
1082           assert(found, "the metadata must exist!");
1083         }
1084 #endif
1085         if (do_patch) {
1086           // replace instructions
1087           // first replace the tail, then the call
1088 #if defined(ARM) && !defined(AARCH32)
1089           if((load_klass_or_mirror_patch_id ||
1090               stub_id == Runtime1::load_appendix_patching_id) &&
1091               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1092             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1093             address addr = NULL;
1094             assert(nm != NULL, "invalid nmethod_pc");
1095             RelocIterator mds(nm, copy_buff, copy_buff + 1);
1096             while (mds.next()) {
1097               if (mds.type() == relocInfo::oop_type) {
1098                 assert(stub_id == Runtime1::load_mirror_patching_id ||
1099                        stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1100                 oop_Relocation* r = mds.oop_reloc();
1101                 addr = (address)r->oop_addr();
1102                 break;
1103               } else if (mds.type() == relocInfo::metadata_type) {
1104                 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1105                 metadata_Relocation* r = mds.metadata_reloc();
1106                 addr = (address)r->metadata_addr();
1107                 break;
1108               }


1137             // the reloc info so that it will get updated during
1138             // future GCs.
1139             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1140             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1141                                                      relocInfo::none, rtype);
1142 #ifdef SPARC
1143             // Sparc takes two relocations for an metadata so update the second one.
1144             address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;
1145             RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1146             relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1147                                                      relocInfo::none, rtype);
1148 #endif
1149 #ifdef PPC
1150           { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
1151             RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1152             relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1153                                                      relocInfo::none, rtype);
1154           }
1155 #endif
1156           }
1157 #ifdef AARCH32
1158           // AArch32 have (disabled) relocation for offset, should enable it back
1159           if (stub_id == Runtime1::access_field_patching_id) {
1160             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1161             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1162             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1163                                                      relocInfo::none, relocInfo::section_word_type);
1164           }
1165 #endif
1166 
1167         } else {
1168           ICache::invalidate_range(copy_buff, *byte_count);
1169           NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1170         }
1171       }
1172     }
1173   }
1174 
1175   // If we are patching in a non-perm oop, make sure the nmethod
1176   // is on the right list.
1177   if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
1178                               (appendix.not_null() && appendix->is_scavengable()))) {
1179     MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1180     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1181     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1182     if (!nm->on_scavenge_root_list()) {
1183       CodeCache::add_scavenge_root_nmethod(nm);
1184     }
1185 


< prev index next >