< prev index next >

src/hotspot/share/code/codeCache.cpp

Print this page

  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm_io.h"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/codeHeapState.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/dependencyContext.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nmethod.hpp"
  35 #include "code/pcDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/oopMap.hpp"

  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/allocation.inline.hpp"
  44 #include "memory/iterator.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "memory/universe.hpp"
  47 #include "oops/method.inline.hpp"
  48 #include "oops/objArrayOop.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "oops/verifyOopClosure.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/deoptimization.hpp"
  54 #include "runtime/globals_extension.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/icache.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/mutexLocker.hpp"

 636 CodeBlob* CodeCache::find_blob(void* start) {
 637   CodeBlob* result = find_blob_unsafe(start);
 638   // We could potentially look up non_entrant methods
 639   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");
 640   return result;
 641 }
 642 
 643 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 644 // what you are doing)
 645 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
 646   // NMT can walk the stack before code cache is created
 647   if (_heaps != NULL) {
 648     CodeHeap* heap = get_code_heap_containing(start);
 649     if (heap != NULL) {
 650       return heap->find_blob_unsafe(start);
 651     }
 652   }
 653   return NULL;
 654 }
 655 

















 656 nmethod* CodeCache::find_nmethod(void* start) {
 657   CodeBlob* cb = find_blob(start);
 658   assert(cb->is_nmethod(), "did not find an nmethod");
 659   return (nmethod*)cb;
 660 }
 661 
 662 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 663   assert_locked_or_safepoint(CodeCache_lock);
 664   FOR_ALL_HEAPS(heap) {
 665     FOR_ALL_BLOBS(cb, *heap) {
 666       f(cb);
 667     }
 668   }
 669 }
 670 
 671 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 672   assert_locked_or_safepoint(CodeCache_lock);
 673   NMethodIterator iter(NMethodIterator::all_blobs);
 674   while(iter.next()) {
 675     f(iter.method());

 759       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 760         break;
 761       }
 762     }
 763   }
 764 }
 765 
 766 // Delete exception caches that have been concurrently unlinked,
 767 // followed by a global handshake operation.
 768 void CodeCache::purge_exception_caches() {
 769   ExceptionCache* curr = _exception_cache_purge_list;
 770   while (curr != NULL) {
 771     ExceptionCache* next = curr->purge_list_next();
 772     delete curr;
 773     curr = next;
 774   }
 775   _exception_cache_purge_list = NULL;
 776 }
 777 
 778 uint8_t CodeCache::_unloading_cycle = 1;

 779 
 780 void CodeCache::increment_unloading_cycle() {
 781   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 782   // 0 is reserved for new methods.
 783   _unloading_cycle = (_unloading_cycle + 1) % 4;
 784   if (_unloading_cycle == 0) {
 785     _unloading_cycle = 1;
 786   }
 787 }
 788 






 789 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
 790   : _is_unloading_behaviour(is_alive)
 791 {
 792   _saved_behaviour = IsUnloadingBehaviour::current();
 793   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 794   increment_unloading_cycle();
 795   DependencyContext::cleaning_start();
 796 }
 797 
 798 CodeCache::UnloadingScope::~UnloadingScope() {
 799   IsUnloadingBehaviour::set_current(_saved_behaviour);
 800   DependencyContext::cleaning_end();
 801 }
 802 
 803 void CodeCache::verify_oops() {
 804   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 805   VerifyOopClosure voc;
 806   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 807   while(iter.next()) {
 808     nmethod* nm = iter.method();

1092     // This includes methods whose inline caches point to old methods, so
1093     // inline cache clearing is unnecessary.
1094     if (nm->has_evol_metadata()) {
1095       nm->mark_for_deoptimization();
1096       add_to_old_table(nm);
1097       number_of_marked_CodeBlobs++;
1098     }
1099   }
1100 
1101   // return total count of nmethods marked for deoptimization, if zero the caller
1102   // can skip deoptimization
1103   return number_of_marked_CodeBlobs;
1104 }
1105 
1106 void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
1107   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1108   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1109   while(iter.next()) {
1110     CompiledMethod* nm = iter.method();
1111     if (!nm->method()->is_method_handle_intrinsic()) {
1112       nm->mark_for_deoptimization();


1113       if (nm->has_evol_metadata()) {
1114         add_to_old_table(nm);
1115       }
1116     }
1117   }
1118 }
1119 
1120 // Flushes compiled methods dependent on redefined classes, that have already been
1121 // marked for deoptimization.
1122 void CodeCache::flush_evol_dependents() {
1123   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1124 
1125   // CodeCache can only be updated by a thread_in_VM and they will all be
1126   // stopped during the safepoint so CodeCache will be safe to update without
1127   // holding the CodeCache_lock.
1128 
1129   // At least one nmethod has been marked for deoptimization
1130 
1131   Deoptimization::deoptimize_all_marked();
1132 }

1144   }
1145 }
1146 
1147 int CodeCache::mark_for_deoptimization(Method* dependee) {
1148   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1149   int number_of_marked_CodeBlobs = 0;
1150 
1151   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1152   while(iter.next()) {
1153     CompiledMethod* nm = iter.method();
1154     if (nm->is_dependent_on_method(dependee)) {
1155       ResourceMark rm;
1156       nm->mark_for_deoptimization();
1157       number_of_marked_CodeBlobs++;
1158     }
1159   }
1160 
1161   return number_of_marked_CodeBlobs;
1162 }
1163 
1164 void CodeCache::make_marked_nmethods_not_entrant() {
1165   assert_locked_or_safepoint(CodeCache_lock);
1166   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1167   while(iter.next()) {
1168     CompiledMethod* nm = iter.method();
1169     if (nm->is_marked_for_deoptimization()) {
1170       nm->make_not_entrant();














1171     }
1172   }
1173 }
1174 
1175 // Flushes compiled methods dependent on dependee.
1176 void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
1177   assert_lock_strong(Compile_lock);
1178 
1179   if (number_of_nmethods_with_dependencies() == 0) return;
1180 
1181   int marked = 0;
1182   if (dependee->is_linked()) {
1183     // Class initialization state change.
1184     KlassInitDepChange changes(dependee);
1185     marked = mark_for_deoptimization(changes);
1186   } else {
1187     // New class is loaded.
1188     NewKlassDepChange changes(dependee);
1189     marked = mark_for_deoptimization(changes);
1190   }

  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm_io.h"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/codeHeapState.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/dependencyContext.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nmethod.hpp"
  35 #include "code/pcDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/oopMap.hpp"
  39 #include "gc/shared/barrierSetNMethod.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/iterator.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "memory/universe.hpp"
  48 #include "oops/method.inline.hpp"
  49 #include "oops/objArrayOop.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/verifyOopClosure.hpp"
  52 #include "runtime/arguments.hpp"
  53 #include "runtime/atomic.hpp"
  54 #include "runtime/deoptimization.hpp"
  55 #include "runtime/globals_extension.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/icache.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/mutexLocker.hpp"

 637 CodeBlob* CodeCache::find_blob(void* start) {
 638   CodeBlob* result = find_blob_unsafe(start);
 639   // We could potentially look up non_entrant methods
 640   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");
 641   return result;
 642 }
 643 
 644 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 645 // what you are doing)
 646 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
 647   // NMT can walk the stack before code cache is created
 648   if (_heaps != NULL) {
 649     CodeHeap* heap = get_code_heap_containing(start);
 650     if (heap != NULL) {
 651       return heap->find_blob_unsafe(start);
 652     }
 653   }
 654   return NULL;
 655 }
 656 
 657 CodeBlob* CodeCache::patch_nop(NativePostCallNop* nop, void* pc, int& slot) {
 658   CodeBlob* cb = CodeCache::find_blob(pc);
 659   int oopmap_slot = cb->oop_maps()->find_slot_for_offset((intptr_t) pc - (intptr_t) cb->code_begin());
 660   intptr_t cbaddr = (intptr_t) cb;
 661   intptr_t offset = ((intptr_t) pc) - cbaddr;
 662 
 663   if (((oopmap_slot & 0xff) == oopmap_slot) && ((offset & 0xffffff) == offset)) {
 664     jint value = (oopmap_slot << 24) | (jint) offset;
 665     nop->patch(value);
 666     slot = oopmap_slot;
 667   } else {
 668     slot = -1;
 669     log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
 670   }
 671   return cb;
 672 }
 673 
 674 nmethod* CodeCache::find_nmethod(void* start) {
 675   CodeBlob* cb = find_blob(start);
 676   assert(cb->is_nmethod(), "did not find an nmethod");
 677   return (nmethod*)cb;
 678 }
 679 
 680 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 681   assert_locked_or_safepoint(CodeCache_lock);
 682   FOR_ALL_HEAPS(heap) {
 683     FOR_ALL_BLOBS(cb, *heap) {
 684       f(cb);
 685     }
 686   }
 687 }
 688 
 689 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 690   assert_locked_or_safepoint(CodeCache_lock);
 691   NMethodIterator iter(NMethodIterator::all_blobs);
 692   while(iter.next()) {
 693     f(iter.method());

 777       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
 778         break;
 779       }
 780     }
 781   }
 782 }
 783 
 784 // Delete exception caches that have been concurrently unlinked,
 785 // followed by a global handshake operation.
 786 void CodeCache::purge_exception_caches() {
 787   ExceptionCache* curr = _exception_cache_purge_list;
 788   while (curr != NULL) {
 789     ExceptionCache* next = curr->purge_list_next();
 790     delete curr;
 791     curr = next;
 792   }
 793   _exception_cache_purge_list = NULL;
 794 }
 795 
 796 uint8_t CodeCache::_unloading_cycle = 1;
 797 uint64_t CodeCache::_marking_cycle = 0;
 798 
 799 void CodeCache::increment_unloading_cycle() {
 800   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 801   // 0 is reserved for new methods.
 802   _unloading_cycle = (_unloading_cycle + 1) % 4;
 803   if (_unloading_cycle == 0) {
 804     _unloading_cycle = 1;
 805   }
 806 }
 807 
 808 void CodeCache::increment_marking_cycle() {
 809   ++_marking_cycle;
 810   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 811   bs_nm->arm_all_nmethods();
 812 }
 813 
 814 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
 815   : _is_unloading_behaviour(is_alive)
 816 {
 817   _saved_behaviour = IsUnloadingBehaviour::current();
 818   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 819   increment_unloading_cycle();
 820   DependencyContext::cleaning_start();
 821 }
 822 
 823 CodeCache::UnloadingScope::~UnloadingScope() {
 824   IsUnloadingBehaviour::set_current(_saved_behaviour);
 825   DependencyContext::cleaning_end();
 826 }
 827 
 828 void CodeCache::verify_oops() {
 829   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 830   VerifyOopClosure voc;
 831   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 832   while(iter.next()) {
 833     nmethod* nm = iter.method();

1117     // This includes methods whose inline caches point to old methods, so
1118     // inline cache clearing is unnecessary.
1119     if (nm->has_evol_metadata()) {
1120       nm->mark_for_deoptimization();
1121       add_to_old_table(nm);
1122       number_of_marked_CodeBlobs++;
1123     }
1124   }
1125 
1126   // return total count of nmethods marked for deoptimization, if zero the caller
1127   // can skip deoptimization
1128   return number_of_marked_CodeBlobs;
1129 }
1130 
1131 void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
1132   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1133   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1134   while(iter.next()) {
1135     CompiledMethod* nm = iter.method();
1136     if (!nm->method()->is_method_handle_intrinsic()) {
1137       if (nm->can_be_deoptimized()) {
1138         nm->mark_for_deoptimization();
1139       }
1140       if (nm->has_evol_metadata()) {
1141         add_to_old_table(nm);
1142       }
1143     }
1144   }
1145 }
1146 
1147 // Flushes compiled methods dependent on redefined classes, that have already been
1148 // marked for deoptimization.
1149 void CodeCache::flush_evol_dependents() {
1150   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1151 
1152   // CodeCache can only be updated by a thread_in_VM and they will all be
1153   // stopped during the safepoint so CodeCache will be safe to update without
1154   // holding the CodeCache_lock.
1155 
1156   // At least one nmethod has been marked for deoptimization
1157 
1158   Deoptimization::deoptimize_all_marked();
1159 }

1171   }
1172 }
1173 
1174 int CodeCache::mark_for_deoptimization(Method* dependee) {
1175   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1176   int number_of_marked_CodeBlobs = 0;
1177 
1178   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1179   while(iter.next()) {
1180     CompiledMethod* nm = iter.method();
1181     if (nm->is_dependent_on_method(dependee)) {
1182       ResourceMark rm;
1183       nm->mark_for_deoptimization();
1184       number_of_marked_CodeBlobs++;
1185     }
1186   }
1187 
1188   return number_of_marked_CodeBlobs;
1189 }
1190 
1191 void CodeCache::make_marked_nmethods_not_entrant(GrowableArray<CompiledMethod*>* marked) {
1192   assert_locked_or_safepoint(CodeCache_lock);
1193   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1194   while(iter.next()) {
1195     CompiledMethod* nm = iter.method();
1196     if (nm->is_marked_for_deoptimization()) {
1197       if (!nm->make_not_entrant()) {
1198         // if the method is not entrant already then it is needed run barrier
1199         // to don't allow method become zombie before deoptimization even without safepoint
1200         nm->run_nmethod_entry_barrier();
1201       }
1202       marked->append(nm);
1203     }
1204   }
1205 }
1206 
1207 void CodeCache::make_marked_nmethods_deoptimized(GrowableArray<CompiledMethod*>* marked) {
1208   for (int i = 0; i < marked->length(); i++) {
1209     CompiledMethod* nm = marked->at(i);
1210     if (nm->is_marked_for_deoptimization() && nm->can_be_deoptimized()) {
1211       nm->make_deoptimized();
1212     }
1213   }
1214 }
1215 
1216 // Flushes compiled methods dependent on dependee.
1217 void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
1218   assert_lock_strong(Compile_lock);
1219 
1220   if (number_of_nmethods_with_dependencies() == 0) return;
1221 
1222   int marked = 0;
1223   if (dependee->is_linked()) {
1224     // Class initialization state change.
1225     KlassInitDepChange changes(dependee);
1226     marked = mark_for_deoptimization(changes);
1227   } else {
1228     // New class is loaded.
1229     NewKlassDepChange changes(dependee);
1230     marked = mark_for_deoptimization(changes);
1231   }
< prev index next >